summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGordon Ross <gordon.w.ross@gmail.com>2016-10-09 15:30:00 -0400
committerGordon Ross <gordon.w.ross@gmail.com>2016-11-19 16:39:27 -0500
commit47dc10d701a66963400f0945b9706bc0818816b5 (patch)
tree7a05b5dfa99d5a6ded180cff1c7a8451b4d434c3
parent6a64e34abcc9c88458d0cd2a916c3c6bfda3c4c1 (diff)
downloadillumos-gfx-drm-47dc10d701a66963400f0945b9706bc0818816b5.tar.gz
Import kernel drm code from https://hg.java.net/hg/solaris-x11~x-s12-clone
-rw-r--r--usr/src/cmd/mdb/i915/i915.c2302
-rw-r--r--usr/src/pkg/manifests/driver-graphics-drm.p5m145
-rw-r--r--usr/src/uts/common/drm/drm.h928
-rw-r--r--usr/src/uts/common/drm/drmP.h1606
-rw-r--r--usr/src/uts/common/drm/drm_atomic.h (renamed from usr/src/uts/common/io/drm/drm_atomic.h)18
-rw-r--r--usr/src/uts/common/drm/drm_core.h49
-rw-r--r--usr/src/uts/common/drm/drm_crtc.h1068
-rw-r--r--usr/src/uts/common/drm/drm_crtc_helper.h176
-rw-r--r--usr/src/uts/common/drm/drm_dp_helper.h368
-rw-r--r--usr/src/uts/common/drm/drm_edid.h295
-rw-r--r--usr/src/uts/common/drm/drm_fb_helper.h121
-rw-r--r--usr/src/uts/common/drm/drm_fourcc.h229
-rw-r--r--usr/src/uts/common/drm/drm_io32.h (renamed from usr/src/uts/common/io/drm/drm_io32.h)74
-rw-r--r--usr/src/uts/common/drm/drm_linux.h210
-rw-r--r--usr/src/uts/common/drm/drm_linux_list.h165
-rw-r--r--usr/src/uts/common/drm/drm_mm.h242
-rw-r--r--usr/src/uts/common/drm/drm_mode.h559
-rw-r--r--usr/src/uts/common/drm/drm_os_solaris.h119
-rw-r--r--usr/src/uts/common/drm/drm_pciids.h (renamed from usr/src/uts/intel/io/drm/drm_pciids.h)35
-rw-r--r--usr/src/uts/common/drm/drm_rect.h167
-rw-r--r--usr/src/uts/common/drm/drm_sarea.h (renamed from usr/src/uts/common/io/drm/drm_sarea.h)45
-rw-r--r--usr/src/uts/common/drm/drm_sun_i2c.h75
-rw-r--r--usr/src/uts/common/drm/drm_sun_idr.h90
-rw-r--r--usr/src/uts/common/drm/drm_sun_pci.h84
-rw-r--r--usr/src/uts/common/drm/drm_sun_timer.h56
-rw-r--r--usr/src/uts/common/drm/drm_sun_workqueue.h53
-rw-r--r--usr/src/uts/common/drm/drm_sunmod.h118
-rw-r--r--usr/src/uts/common/drm/i915_drm.h (renamed from usr/src/uts/intel/io/drm/i915_drm.h)609
-rw-r--r--usr/src/uts/common/io/drm/LICENSE_DRM1349
-rw-r--r--usr/src/uts/common/io/drm/LICENSE_DRM.descrip (renamed from usr/src/uts/common/io/drm/THIRDPARTYLICENSE.descrip)0
-rw-r--r--usr/src/uts/common/io/drm/THIRDPARTYLICENSE314
-rw-r--r--usr/src/uts/common/io/drm/ati_pcigart.c38
-rw-r--r--usr/src/uts/common/io/drm/drm.h865
-rw-r--r--usr/src/uts/common/io/drm/drmP.h1103
-rw-r--r--usr/src/uts/common/io/drm/drm_agpsupport.c796
-rw-r--r--usr/src/uts/common/io/drm/drm_auth.c236
-rw-r--r--usr/src/uts/common/io/drm/drm_bufs.c1462
-rw-r--r--usr/src/uts/common/io/drm/drm_cache.c11
-rw-r--r--usr/src/uts/common/io/drm/drm_context.c660
-rw-r--r--usr/src/uts/common/io/drm/drm_crtc.c3944
-rw-r--r--usr/src/uts/common/io/drm/drm_crtc_helper.c1084
-rw-r--r--usr/src/uts/common/io/drm/drm_dma.c166
-rw-r--r--usr/src/uts/common/io/drm/drm_dp_helper.c147
-rw-r--r--usr/src/uts/common/io/drm/drm_dp_i2c_helper.c207
-rw-r--r--usr/src/uts/common/io/drm/drm_drawable.c74
-rw-r--r--usr/src/uts/common/io/drm/drm_drv.c1013
-rw-r--r--usr/src/uts/common/io/drm/drm_edid.c2995
-rw-r--r--usr/src/uts/common/io/drm/drm_fb_helper.c958
-rw-r--r--usr/src/uts/common/io/drm/drm_fops.c522
-rw-r--r--usr/src/uts/common/io/drm/drm_gem.c875
-rw-r--r--usr/src/uts/common/io/drm/drm_io32.c386
-rw-r--r--usr/src/uts/common/io/drm/drm_ioctl.c581
-rw-r--r--usr/src/uts/common/io/drm/drm_irq.c1634
-rw-r--r--usr/src/uts/common/io/drm/drm_kstat.c46
-rw-r--r--usr/src/uts/common/io/drm/drm_linux.c72
-rw-r--r--usr/src/uts/common/io/drm/drm_linux_list.h99
-rw-r--r--usr/src/uts/common/io/drm/drm_lock.c332
-rw-r--r--usr/src/uts/common/io/drm/drm_memory.c222
-rw-r--r--usr/src/uts/common/io/drm/drm_mm.c887
-rw-r--r--usr/src/uts/common/io/drm/drm_modes.c1140
-rw-r--r--usr/src/uts/common/io/drm/drm_msg.c51
-rw-r--r--usr/src/uts/common/io/drm/drm_pci.c180
-rw-r--r--usr/src/uts/common/io/drm/drm_rect.c285
-rw-r--r--usr/src/uts/common/io/drm/drm_scatter.c169
-rw-r--r--usr/src/uts/common/io/drm/drm_stub.c3
-rw-r--r--usr/src/uts/common/io/drm/drm_sun_i2c.c496
-rw-r--r--usr/src/uts/common/io/drm/drm_sun_idr.c488
-rw-r--r--usr/src/uts/common/io/drm/drm_sun_pci.c219
-rw-r--r--usr/src/uts/common/io/drm/drm_sun_timer.c90
-rw-r--r--usr/src/uts/common/io/drm/drm_sun_workqueue.c93
-rw-r--r--usr/src/uts/common/io/drm/drm_sunmod.c1372
-rw-r--r--usr/src/uts/common/io/drm/drm_sunmod.h160
-rw-r--r--usr/src/uts/common/io/drm/drm_sysfs.c141
-rw-r--r--usr/src/uts/common/io/drm/queue.h585
-rw-r--r--usr/src/uts/intel/io/drm/i915_dma.c1146
-rw-r--r--usr/src/uts/intel/io/drm/i915_drv.c1047
-rw-r--r--usr/src/uts/intel/io/drm/i915_drv.h1842
-rw-r--r--usr/src/uts/intel/io/drm/i915_gem.c2919
-rw-r--r--usr/src/uts/intel/io/drm/i915_gem_debug.c1108
-rw-r--r--usr/src/uts/intel/io/drm/i915_gem_tiling.c390
-rw-r--r--usr/src/uts/intel/io/drm/i915_irq.c1052
-rw-r--r--usr/src/uts/intel/io/drm/i915_mem.c425
-rw-r--r--usr/src/uts/intel/io/i915/LICENSE_I9151432
-rw-r--r--usr/src/uts/intel/io/i915/LICENSE_I915.descrip1
-rw-r--r--usr/src/uts/intel/io/i915/dvo.h158
-rw-r--r--usr/src/uts/intel/io/i915/dvo_ch7017.c422
-rw-r--r--usr/src/uts/intel/io/i915/dvo_ch7xxx.c376
-rw-r--r--usr/src/uts/intel/io/i915/dvo_ivch.c442
-rw-r--r--usr/src/uts/intel/io/i915/dvo_ns2501.c596
-rw-r--r--usr/src/uts/intel/io/i915/dvo_sil164.c289
-rw-r--r--usr/src/uts/intel/io/i915/dvo_tfp410.c330
-rw-r--r--usr/src/uts/intel/io/i915/i915_dma.c1823
-rw-r--r--usr/src/uts/intel/io/i915/i915_drv.c1465
-rw-r--r--usr/src/uts/intel/io/i915/i915_drv.h2046
-rw-r--r--usr/src/uts/intel/io/i915/i915_gem.c3988
-rw-r--r--usr/src/uts/intel/io/i915/i915_gem_context.c608
-rw-r--r--usr/src/uts/intel/io/i915/i915_gem_debug.c952
-rw-r--r--usr/src/uts/intel/io/i915/i915_gem_evict.c186
-rw-r--r--usr/src/uts/intel/io/i915/i915_gem_execbuffer.c1192
-rw-r--r--usr/src/uts/intel/io/i915/i915_gem_gtt.c1180
-rw-r--r--usr/src/uts/intel/io/i915/i915_gem_stolen.c412
-rw-r--r--usr/src/uts/intel/io/i915/i915_gem_tiling.c522
-rw-r--r--usr/src/uts/intel/io/i915/i915_io32.c109
-rw-r--r--usr/src/uts/intel/io/i915/i915_io32.h41
-rw-r--r--usr/src/uts/intel/io/i915/i915_irq.c3657
-rw-r--r--usr/src/uts/intel/io/i915/i915_reg.h5371
-rw-r--r--usr/src/uts/intel/io/i915/i915_suspend.c439
-rw-r--r--usr/src/uts/intel/io/i915/i915_ums.c503
-rw-r--r--usr/src/uts/intel/io/i915/intel_bios.c759
-rw-r--r--usr/src/uts/intel/io/i915/intel_bios.h699
-rw-r--r--usr/src/uts/intel/io/i915/intel_crt.c825
-rw-r--r--usr/src/uts/intel/io/i915/intel_ddi.c1388
-rw-r--r--usr/src/uts/intel/io/i915/intel_display.c10275
-rw-r--r--usr/src/uts/intel/io/i915/intel_dp.c3265
-rw-r--r--usr/src/uts/intel/io/i915/intel_drv.h806
-rw-r--r--usr/src/uts/intel/io/i915/intel_dvo.c557
-rw-r--r--usr/src/uts/intel/io/i915/intel_fb.c189
-rw-r--r--usr/src/uts/intel/io/i915/intel_hdmi.c1274
-rw-r--r--usr/src/uts/intel/io/i915/intel_i2c.c618
-rw-r--r--usr/src/uts/intel/io/i915/intel_lvds.c1114
-rw-r--r--usr/src/uts/intel/io/i915/intel_modes.c131
-rw-r--r--usr/src/uts/intel/io/i915/intel_overlay.c1533
-rw-r--r--usr/src/uts/intel/io/i915/intel_panel.c738
-rw-r--r--usr/src/uts/intel/io/i915/intel_pm.c5177
-rw-r--r--usr/src/uts/intel/io/i915/intel_ringbuffer.c2091
-rw-r--r--usr/src/uts/intel/io/i915/intel_ringbuffer.h312
-rw-r--r--usr/src/uts/intel/io/i915/intel_sdvo.c2961
-rw-r--r--usr/src/uts/intel/io/i915/intel_sdvo_regs.h777
-rw-r--r--usr/src/uts/intel/io/i915/intel_sideband.c177
-rw-r--r--usr/src/uts/intel/io/i915/intel_sprite.c1081
-rw-r--r--usr/src/uts/intel/io/i915/intel_tv.c1683
-rw-r--r--usr/src/uts/intel/io/radeon/r300_cmdbuf.c (renamed from usr/src/uts/intel/io/drm/r300_cmdbuf.c)0
-rw-r--r--usr/src/uts/intel/io/radeon/r300_reg.h (renamed from usr/src/uts/intel/io/drm/r300_reg.h)0
-rw-r--r--usr/src/uts/intel/io/radeon/radeon_cp.c (renamed from usr/src/uts/intel/io/drm/radeon_cp.c)0
-rw-r--r--usr/src/uts/intel/io/radeon/radeon_drm.h (renamed from usr/src/uts/intel/io/drm/radeon_drm.h)0
-rw-r--r--usr/src/uts/intel/io/radeon/radeon_drv.c (renamed from usr/src/uts/intel/io/drm/radeon_drv.c)0
-rw-r--r--usr/src/uts/intel/io/radeon/radeon_drv.h (renamed from usr/src/uts/intel/io/drm/radeon_drv.h)0
-rw-r--r--usr/src/uts/intel/io/radeon/radeon_io32.h (renamed from usr/src/uts/intel/io/drm/radeon_io32.h)0
-rw-r--r--usr/src/uts/intel/io/radeon/radeon_irq.c (renamed from usr/src/uts/intel/io/drm/radeon_irq.c)0
-rw-r--r--usr/src/uts/intel/io/radeon/radeon_mem.c (renamed from usr/src/uts/intel/io/drm/radeon_mem.c)0
-rw-r--r--usr/src/uts/intel/io/radeon/radeon_state.c (renamed from usr/src/uts/intel/io/drm/radeon_state.c)0
141 files changed, 97594 insertions, 17861 deletions
diff --git a/usr/src/cmd/mdb/i915/i915.c b/usr/src/cmd/mdb/i915/i915.c
new file mode 100644
index 0000000..706b937
--- /dev/null
+++ b/usr/src/cmd/mdb/i915/i915.c
@@ -0,0 +1,2302 @@
+/*
+ * Copyright (c) 2012, 2013, Intel Corporation.
+ * All Rights Reserved.
+ */
+/*
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/mdb_modapi.h>
+#include <sys/proc.h>
+
+#include <sys/drm/drmP.h>
+#include <i915/src/i915_drv.h>
+#include <i915/src/i915_drm.h>
+#include <i915/src/intel_drv.h>
+
+
+/*
+ * Defines
+ */
+/* dcmd options */
+#define ACTIVE_LIST 0x01
+#define INACTIVE_LIST 0x02
+#define BOUND_LIST 0x04
+#define UNBOUND_LIST 0x08
+
+#define RENDER_RING 0x01
+#define BLT_RING 0x02
+#define BSD_RING 0x04
+
+u32 count, mappable_count, purgeable_count;
+size_t size, mappable_size, purgeable_size;
+
+/*
+ * Initialize the proc_t walker by either using the given starting address,
+ * or reading the value of the kernel's practive pointer. We also allocate
+ * a proc_t for storage, and save this using the walk_data pointer.
+ */
+static int
+head_list_walk_init(mdb_walk_state_t *wsp)
+{
+
+ if (wsp->walk_addr == NULL) {
+ mdb_warn("head is NULL");
+ return (WALK_ERR);
+ }
+
+ wsp->walk_data = mdb_alloc(sizeof (struct list_head), UM_SLEEP);
+
+ if (mdb_vread(wsp->walk_data, sizeof (struct list_head),
+ wsp->walk_addr) == -1) {
+ mdb_warn("failed to read list head at %p", wsp->walk_addr);
+ return (WALK_DONE);
+ }
+
+ wsp->walk_arg = (void *)wsp->walk_addr;
+
+ wsp->walk_addr =
+ (uintptr_t)(((struct list_head *)wsp->walk_data)->next);
+
+ return (WALK_NEXT);
+}
+
+/*
+ * At each step, read a proc_t into our private storage, and then invoke
+ * the callback function. We terminate when we reach a NULL p_next pointer.
+ */
+static int
+head_list_walk_step(mdb_walk_state_t *wsp)
+{
+ int status;
+
+ if (wsp->walk_addr == NULL) {
+ mdb_warn("uncompletement list");
+ return (WALK_DONE);
+ }
+
+ if (mdb_vread(wsp->walk_data, sizeof (struct list_head),
+ wsp->walk_addr) == -1) {
+ mdb_warn("failed to read list at %p", wsp->walk_addr);
+ return (WALK_DONE);
+ }
+
+ if ((void *)wsp->walk_addr == wsp->walk_arg) {
+ return (WALK_DONE);
+ }
+
+ status = wsp->walk_callback(wsp->walk_addr, wsp->walk_data,
+ wsp->walk_cbdata);
+
+ wsp->walk_addr =
+ (uintptr_t)(((struct list_head *)wsp->walk_data)->next);
+ return (status);
+}
+
+/*
+ * The walker's fini function is invoked at the end of each walk. Since we
+ * dynamically allocated a proc_t in sp_walk_init, we must free it now.
+ */
+static void
+head_list_walk_fini(mdb_walk_state_t *wsp)
+{
+ mdb_free(wsp->walk_data, sizeof (proc_t));
+}
+
+static int
+get_drm_dev(struct drm_device *drm_dev)
+{
+ uintptr_t i915_ss;
+ void *state;
+ i_ddi_soft_state *ss;
+ uintptr_t array;
+
+ state = mdb_alloc(sizeof (struct drm_device), UM_SLEEP);
+
+ if (mdb_readsym(&i915_ss, sizeof (i915_ss),
+ "i915_statep") == -1) {
+ mdb_warn("failed to read i915_statep");
+ mdb_free(state, sizeof (struct drm_device));
+ return (-1);
+ }
+
+ if (mdb_vread(state, sizeof (struct drm_device), i915_ss) == -1) {
+ mdb_warn("Failed to read state\n");
+ mdb_free(state, sizeof (struct drm_device));
+ return (-1);
+ }
+
+ ss = (i_ddi_soft_state *) state;
+
+ if (mdb_vread(&array, sizeof (uintptr_t), (uintptr_t)ss->array) == -1) {
+ mdb_warn("Failed to read array\n");
+ mdb_free(state, sizeof (struct drm_device));
+ return (-1);
+ }
+
+ if (mdb_vread(drm_dev, sizeof (struct drm_device), array) == -1) {
+ mdb_warn("Failed to read drm_dev\n");
+ mdb_free(state, sizeof (struct drm_device));
+ return (-1);
+ }
+ mdb_free(state, sizeof (struct drm_device));
+
+ return (DCMD_OK);
+}
+
+static int
+get_i915_private(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *drm_dev;
+ int ret;
+
+ drm_dev = mdb_alloc(sizeof (struct drm_device), UM_SLEEP);
+ ret = get_drm_dev(drm_dev);
+
+ if (ret == DCMD_OK) {
+ if (mdb_vread(dev_priv, sizeof (struct drm_i915_private),
+ (uintptr_t)drm_dev->dev_private) == -1) {
+ mdb_warn("Failed to read i915 private\n");
+ mdb_free(drm_dev, sizeof (struct drm_device));
+ return (-1);
+ }
+ }
+ mdb_free(drm_dev, sizeof (struct drm_device));
+ return (ret);
+}
+
+void
+i915_pciid_help(void)
+{
+ mdb_printf("Print device ID information of Intel graphics card.\n");
+}
+
+/* ARGSUSED */
+static int
+i915_pciid(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ int ret;
+ struct drm_device *drm_dev;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx\n", addr);
+ return (DCMD_OK);
+ }
+
+ drm_dev = mdb_alloc(sizeof (struct drm_device), UM_SLEEP);
+
+ ret = get_drm_dev(drm_dev);
+ if (ret == DCMD_OK)
+ mdb_printf(" vendor 0x%x device 0x%x\n",
+ drm_dev->pci_vendor, drm_dev->pci_device);
+
+ mdb_free(drm_dev, sizeof (struct drm_device));
+ return (ret);
+}
+
+void
+i915_gtt_total_help(void)
+{
+ mdb_printf("Print graphics translation table (GTT) size\n");
+}
+
+/* ARGSUSED */
+static int
+i915_gtt_total(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ int ret;
+ struct drm_i915_private *dev_priv;
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+
+ ret = get_i915_private(dev_priv);
+ if (ret == DCMD_OK) {
+ mdb_printf("gtt total size 0x%x", dev_priv->gtt.total);
+ mdb_printf("gtt mappable size 0x%x",
+ dev_priv->gtt.mappable_end);
+ mdb_printf("gtt stolen size 0x%x", dev_priv->gtt.stolen_size);
+ }
+
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+ return (ret);
+}
+
+static const char *get_pin_flag(struct drm_i915_gem_object *obj)
+{
+ if (obj->user_pin_count > 0)
+ return ("P");
+ else if (obj->pin_count > 0)
+ return ("p");
+ else
+ return (" ");
+}
+
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
+{
+ switch (obj->tiling_mode) {
+ default:
+ case I915_TILING_NONE: return " ";
+ case I915_TILING_X: return "X";
+ case I915_TILING_Y: return "Y";
+ }
+}
+
+static const char *cache_level_str(int type)
+{
+ switch (type) {
+ case I915_CACHE_NONE: return " uncached";
+ case I915_CACHE_LLC: return " snooped (LLC)";
+ case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
+ default: return "";
+ }
+}
+
+static void
+describe_obj(struct drm_i915_gem_object *obj)
+{
+
+ mdb_printf("%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
+ &obj->base,
+ get_pin_flag(obj),
+ get_tiling_flag(obj),
+ obj->base.size / 1024,
+ obj->base.read_domains,
+ obj->base.write_domain,
+ obj->last_read_seqno,
+ obj->last_write_seqno,
+ obj->last_fenced_seqno,
+ cache_level_str(obj->cache_level),
+ obj->dirty ? " dirty" : "",
+ obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+ if (obj->base.name)
+ mdb_printf(" (name: %d)", obj->base.name);
+ if (obj->pin_count)
+ mdb_printf(" (pinned x %d)", obj->pin_count);
+ if (obj->fence_reg != I915_FENCE_REG_NONE)
+ mdb_printf(" (fence: %d)", obj->fence_reg);
+ if (obj->gtt_space != NULL)
+ mdb_printf(" (gtt offset: %08x, size: %08x)",
+ obj->gtt_offset, (unsigned int)obj->base.real_size);
+ if (obj->pin_mappable || obj->fault_mappable) {
+ char s[3], *t = s;
+ if (obj->pin_mappable)
+ *t++ = 'p';
+ if (obj->fault_mappable)
+ *t++ = 'f';
+ *t = '\0';
+ mdb_printf(" (%s mappable)", s);
+ }
+
+}
+
+static void
+i915_obj_info(struct drm_i915_gem_object *obj)
+{
+
+ /* "size", "gtt_off", "kaddr", "pfn_array" */
+
+ mdb_printf(" 0x%-8x 0x%-8x 0x%lx 0x%lx\n",
+ obj->base.real_size, obj->gtt_offset, obj->base.kaddr,
+ obj->base.pfnarray);
+
+ size += obj->base.real_size;
+ ++count;
+ if (obj->map_and_fenceable) {
+ mappable_size += obj->base.real_size;
+ ++mappable_count;
+ }
+
+}
+
+void
+i915_obj_list_node_help(void)
+{
+ mdb_printf("Print objects information for a given list pointer\n"
+ "Fields printed:\n"
+ " obj:\tpointer to drm_i915_gem_object structure\n"
+ " size:\tobject size\n"
+ " gtt_off:\tobject offset in GTT (graphics address)\n"
+ " kaddr:\tobject kernel virtual address\n"
+ " pfn_array:\tArrary which contains object's PFN\n");
+}
+
+/* ARGSUSED */
+static int
+i915_obj_list_node(uintptr_t addr, uint_t flags, int argc,
+ const mdb_arg_t *argv)
+{
+ int ret = DCMD_OK;
+ struct list_head list;
+ struct drm_i915_gem_object *obj;
+
+ if (!(flags & DCMD_ADDRSPEC))
+ return (DCMD_USAGE);
+
+ if (mdb_vread(&list, sizeof (struct list), addr) == -1) {
+ mdb_warn("failed to read list");
+ return (DCMD_ERR);
+ }
+
+ if (list.contain_ptr == 0) {
+ mdb_warn("no object!");
+ return (DCMD_ERR);
+ }
+
+ obj = mdb_alloc(sizeof (struct drm_i915_gem_object), UM_SLEEP);
+ if (mdb_vread(obj, sizeof (struct drm_i915_gem_object),
+ (uintptr_t)list.contain_ptr) == -1) {
+ mdb_warn("failed to read object infor");
+ ret = DCMD_ERR;
+ goto err;
+ }
+
+ mdb_printf("0x%lx ", list.contain_ptr);
+ i915_obj_info(obj);
+
+err:
+ mdb_free(obj, sizeof (struct drm_i915_gem_object));
+ return (ret);
+}
+
+static int
+obj_walk_list(uintptr_t addr, const char *str)
+{
+ struct list_head *head;
+ int ret = DCMD_OK;
+
+ head = mdb_alloc(sizeof (struct list_head), UM_SLEEP);
+
+ if (mdb_vread(head, sizeof (struct list_head), addr) == -1) {
+ mdb_warn("failed to read active_list");
+ ret = DCMD_ERR;
+ goto err;
+ }
+ mdb_printf("Dump %s List\n", str);
+ mdb_printf("%s %20s %14s %9s %23s\n", "obj", "size", "gtt_off",
+ "kaddr", "pfn_array");
+
+ if (mdb_pwalk_dcmd("head_list", "i915_obj_list_node",
+ 0, NULL, (uintptr_t)head->prev) == -1) {
+ mdb_warn("failed to walk head_list");
+ ret = DCMD_ERR;
+ goto err;
+ }
+err:
+ mdb_free(head, sizeof (struct list_head));
+ return (ret);
+}
+
+void
+i915_obj_list_help(void)
+{
+ mdb_printf("Print object lists information\n"
+ "Fields printed:\n"
+ " obj:\tpointer to drm_i915_gem_object structure\n"
+ " size:\tobject size\n"
+ " gtt_off:\tobject offset in GTT (graphics address)\n"
+ " kaddr:\tobject kernel virtual address\n"
+ " pfn_array:\tArrary which contains object's PFN\n"
+ "Options:\n"
+ " -a flag:\tprint only active list with flag set\n"
+ " -i flag:\tprint only inactive list with flag set\n"
+ " -b flag:\tprint only bound list with flag set\n"
+ " -u flag:\tprint only unbound list with flag set\n"
+ "\nWithout the option, print information about all objects in "
+ "system.\n"
+ "Please make sure mdb_track is enabled in i915 driver by "
+ "mdb_track_enable,\n"
+ "before dump all objects information.\n");
+}
+
+/* ARGSUSED */
+static int
+i915_obj_list(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ uint_t list_flag = 0;
+ struct drm_i915_private *dev_priv;
+ int ret = DCMD_OK;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx, just ignore\n",
+ addr);
+ }
+
+ if (mdb_getopts(argc, argv,
+ 'a', MDB_OPT_SETBITS, ACTIVE_LIST, &list_flag,
+ 'i', MDB_OPT_SETBITS, INACTIVE_LIST, &list_flag,
+ 'b', MDB_OPT_SETBITS, BOUND_LIST, &list_flag,
+ 'u', MDB_OPT_SETBITS, UNBOUND_LIST, &list_flag, NULL) != argc) {
+ mdb_printf("\nUsage:\n"
+ "-a dump active_list\n"
+ "-i dump inactive_list\n"
+ "-b dump bound_list\n"
+ "-u dump unbound_list\n");
+ return (DCMD_USAGE);
+ }
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+
+ mdb_printf("%u objects, 0x%lx bytes\n",
+ dev_priv->mm.object_count, dev_priv->mm.object_memory);
+
+ if (list_flag == 0) {
+ int mdb_track = 0;
+ if (mdb_readvar(&mdb_track, "mdb_track_enable") == -1) {
+ mdb_warn("failed to read mdb_track");
+ mdb_track = 0;
+ }
+
+ if (mdb_track == 0) {
+ mdb_printf("mdb_track is not enabled. Please enable "
+ "it by set drm:mdb_track_enable=1");
+ goto err;
+ }
+
+ /* dump whole gem objects list */
+ struct drm_device *drm_dev;
+ struct list_head *head;
+ drm_dev = mdb_alloc(sizeof (struct drm_device), UM_SLEEP);
+ ret = get_drm_dev(drm_dev);
+ if (ret != DCMD_OK)
+ goto err2;
+
+ head = mdb_alloc(sizeof (struct list_head), UM_SLEEP);
+ if (mdb_vread(head, sizeof (struct list_head),
+ (uintptr_t)drm_dev->gem_objects_list.next) == -1) {
+ mdb_warn("failed to read whole gem list");
+ ret = DCMD_ERR;
+ goto err1;
+ }
+
+ mdb_printf("Dump %s List\n", "Whole gem objects");
+ mdb_printf("%s %20s %14s %9s %23s\n", "obj", "size",
+ "gtt_off", "kaddr", "pfn_array");
+
+ if (mdb_pwalk_dcmd("head_list", "i915_obj_list_node",
+ 0, NULL, (uintptr_t)head->prev) == -1) {
+ mdb_warn("failed to walk head_list");
+ ret = DCMD_ERR;
+ goto err;
+ }
+err1:
+ mdb_free(head, sizeof (struct list_head));
+err2:
+ mdb_free(drm_dev, sizeof (struct drm_device));
+ goto err;
+ }
+ if (list_flag & ACTIVE_LIST) {
+ size = count = mappable_size = mappable_count = 0;
+ ret = obj_walk_list((uintptr_t)dev_priv->mm.active_list.next,
+ "Activate");
+ if (ret != DCMD_OK)
+ goto err;
+ mdb_printf(" %u [%u] active objects, 0x%lx [0x%lx] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ }
+
+ if (list_flag & INACTIVE_LIST) {
+ size = count = mappable_size = mappable_count = 0;
+ ret = obj_walk_list((uintptr_t)dev_priv->mm.inactive_list.next,
+ "Inactivate");
+ if (ret != DCMD_OK)
+ goto err;
+ mdb_printf(" %u [%u] inactive objects, 0x%lx [0x%lx] bytes\n",
+ count, mappable_count, size, mappable_size);
+ }
+
+ if (list_flag & BOUND_LIST) {
+ size = count = mappable_size = mappable_count = 0;
+ ret = obj_walk_list((uintptr_t)dev_priv->mm.bound_list.next,
+ "Bound");
+ if (ret != DCMD_OK)
+ goto err;
+ mdb_printf("%u [%u] objects, 0x%lx [0x%lx] bytes in gtt\n",
+ count, mappable_count, size, mappable_size);
+
+ }
+
+ if (list_flag & UNBOUND_LIST) {
+ size = count = purgeable_size = purgeable_count = 0;
+ ret = obj_walk_list((uintptr_t)dev_priv->mm.unbound_list.next,
+ "Unbound");
+ if (ret != DCMD_OK)
+ goto err;
+ mdb_printf("%u unbound objects, 0x%lx bytes\n", count, size);
+ }
+
+err:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+ return (ret);
+}
+
+void
+i915_ringbuffer_info_help(void)
+{
+ mdb_printf("Print ring object information\n"
+ "Fields printed:\n"
+ " mmio_base:\tMMIO base address\n"
+ " obj:\tpointer to ring object's drm_i915_gem_object structure\n"
+ "Options:\n"
+ " -l flag:\tprint only BLT ring information with flag set\n"
+ " -r flag:\tprint only RENDER ring information with flag set\n"
+ " -s flag:\tprint only BSD ring information with flag set\n"
+ "Without the option given, print information about all rings.\n");
+}
+
+/* ARGSUSED */
+static int
+i915_ringbuffer_info(uintptr_t addr, uint_t flags, int argc,
+ const mdb_arg_t *argv)
+{
+ struct drm_i915_private *dev_priv;
+ uint_t ring_flag = 0;
+ int ret = DCMD_OK;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx, just ignore\n",
+ addr);
+ }
+
+ if (mdb_getopts(argc, argv,
+ 'l', MDB_OPT_SETBITS, BLT_RING, &ring_flag,
+ 'r', MDB_OPT_SETBITS, RENDER_RING, &ring_flag,
+ 's', MDB_OPT_SETBITS, BSD_RING, &ring_flag, NULL) != argc) {
+ mdb_printf("\nUsage:\n"
+ "-l blt ring information\n"
+ "-r render ring information\n"
+ "-s bsd ring information\n");
+ return (DCMD_USAGE);
+ }
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+
+ if (ring_flag == 0)
+ ring_flag = 0xff;
+
+ if (ring_flag & RENDER_RING) {
+ mdb_printf("Render ring mmio_base 0x%lx obj 0x%lx\n",
+ dev_priv->ring[0].mmio_base,
+ dev_priv->ring[0].obj);
+ }
+
+ if (ring_flag & BLT_RING) {
+ mdb_printf("BLT ring mmio_base 0x%lx obj 0x%lx\n",
+ dev_priv->ring[2].mmio_base,
+ dev_priv->ring[2].obj);
+ }
+
+ if (ring_flag & BSD_RING) {
+ mdb_printf("BSD ring mmio_base 0x%lx obj 0x%lx\n",
+ dev_priv->ring[1].mmio_base,
+ dev_priv->ring[1].obj);
+ }
+
+err:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+ return (ret);
+}
+
+void
+i915_gtt_dump_help(void)
+{
+ mdb_printf("Print the address of gtt_dump\n"
+ "\ngtt_dump is a snapshot of whole GTT table when GPU hang "
+ "happened.\n"
+ "Please make sure gpu_dump is enabled in i915 driver before "
+ "using it.\n"
+ "\nSee also: \"::help i915_error_reg_dump\"\n");
+}
+
+/* ARGSUSED */
+static int
+i915_gtt_dump(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ struct drm_device *drm_dev;
+ int ret = DCMD_OK;
+
+ drm_dev = mdb_alloc(sizeof (struct drm_device), UM_SLEEP);
+ ret = get_drm_dev(drm_dev);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+
+ if (drm_dev->gtt_dump != 0)
+ mdb_printf("gtt_dump address 0x%lx\n", drm_dev->gtt_dump);
+ else
+ mdb_printf("There is no gtt_dump");
+
+err:
+ mdb_free(drm_dev, sizeof (struct drm_device));
+ return (ret);
+}
+
+static uint32_t
+i915_read(struct drm_i915_private *dev_priv, uintptr_t addr, uint32_t *val)
+{
+ struct drm_local_map regs;
+ int ret = DCMD_OK;
+
+ if (mdb_vread(&regs, sizeof (struct drm_local_map),
+ (intptr_t)dev_priv->regs) == -1) {
+ mdb_warn("Failed to read dev_priv->regs\n");
+ ret = DCMD_ERR;
+ return (ret);
+ }
+
+ if (mdb_pread(val, sizeof (uint32_t),
+ (intptr_t)regs.offset + addr) == -1) {
+ mdb_warn("Failed to read register 0x%x\n", addr);
+ ret = DCMD_ERR;
+ return (ret);
+ }
+
+ return (ret);
+
+}
+
+void
+i915_register_read_help(void)
+{
+ mdb_printf("Print register value for a given register offset\n");
+}
+
+/* ARGSUSED */
+static int
+i915_register_read(uintptr_t addr, uint_t flags, int argc,
+ const mdb_arg_t *argv)
+{
+ struct drm_i915_private *dev_priv;
+ int ret = DCMD_OK;
+ uint32_t val;
+
+ if (!(flags & DCMD_ADDRSPEC)) {
+ return (DCMD_USAGE);
+ }
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+ ret = i915_read(dev_priv, addr, &val);
+ if (ret == DCMD_ERR) {
+ goto err;
+ }
+
+ mdb_printf("Register [0x%x]: 0x%x\n", addr, val);
+err:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+ return (ret);
+}
+
+void
+i915_error_reg_dump_help(void)
+{
+ mdb_printf("Print debug register information\n"
+ "Registers printed:\n"
+ " PGTBL_ER:\tPage Table Errors Register\n"
+ " INSTPM:\tCommand Parser Mode Register\n"
+ " EIR:\tError Identity Register\n"
+ " EHR:\tError Header Register\n"
+ " INSTDONE:\tInstruction Stream Interface Done Register\n"
+ " INSTDONE1:\tInstruction Stream Interface Done 1\n"
+ " INSTPS:\tInstruction Parser State Register\n"
+ " ACTHD:\tActive Head Pointer Register\n"
+ " DMA_FADD_P:\tPrimary DMA Engine Fetch Address Register\n"
+ "\ngtt_dump is a snapshot of whole GTT table when GPU hang happened.\n"
+ "\nSee also: \"::help i915_gtt_dump\"\n");
+}
+
+/* ARGSUSED */
+static int
+i915_error_reg_dump(uintptr_t addr, uint_t flags, int argc,
+ const mdb_arg_t *argv)
+{
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ int ret = DCMD_OK;
+ uint32_t val;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx\n", addr);
+ return (DCMD_OK);
+ }
+
+ dev = mdb_alloc(sizeof (struct drm_device), UM_SLEEP);
+ ret = get_drm_dev(dev);
+ if (ret == DCMD_ERR)
+ goto err1;
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err2;
+ }
+
+ ret = i915_read(dev_priv, (uintptr_t)PGTBL_ER, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("PGTBL_ER: 0x%lx\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)INSTPM, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("INSTPM: 0x%lx\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)EIR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("EIR: 0x%lx\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)ERROR_GEN6, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("ERROR_GEN6: 0x%lx\n", val);
+
+ mdb_printf("\nBlitter command stream:\n");
+ ret = i915_read(dev_priv, (uintptr_t)0x22064, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(" BLT EIR: 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)0x22068, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(" BLT EHR: 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)0x2206C, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(" INSTDONE: 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)0x22074, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(" ACTHD: 0x%08x\n", val);
+
+
+ mdb_printf("\nRender command stream:\n");
+ ret = i915_read(dev_priv, (uintptr_t)IPEIR_I965, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(" RENDER EIR: 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)IPEHR_I965, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(" RENDER EHR: 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)INSTDONE_I965, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(" INSTDONE: 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)INSTPS, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(" INSTPS: 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)INSTDONE1, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(" INSTDONE1: 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)ACTHD_I965, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(" ACTHD: 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)0x2078, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(" DMA_FADD_P: 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)0x04094, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("\nGraphics Engine Fault 0x%lx\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)0x04194, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Media Engine Fault 0x%lx\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)0x04294, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Blitter Engine Fault 0x%lx\n", val);
+
+ if (dev->gtt_dump != NULL)
+ mdb_printf("gtt dump to %p\n", dev->gtt_dump);
+ else
+ mdb_printf("no gtt dump\n");
+err2:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+err1:
+ mdb_free(dev, sizeof (struct drm_device));
+
+ return (ret);
+
+}
+
+void
+i915_obj_history_help(void)
+{
+ mdb_printf("Print object history information for a given "
+ "drm_i915_gem_object pointer\n"
+ "Fields printed:\n"
+ " event:\tOperation name\n"
+ " cur_seq:\tCurrent system SeqNO\n"
+ " last_seq:\tLast SeqNO has been processed\n"
+ " ring addr:\tPoint to intel_ring_buffer structure\n"
+ "\nNOTE: Please make sure mdb_track is enabled in i915 driver "
+ "by setting mdb_track_enable\n");
+}
+
+/* ARGSUSED */
+static int
+i915_obj_history(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ struct drm_gem_object obj;
+ struct list_head node;
+ uintptr_t temp;
+ struct drm_history_list history_node;
+ int ret = DCMD_OK;
+ int condition = 1;
+ int mdb_track = 0;
+
+ if (!(flags & DCMD_ADDRSPEC)) {
+ return (DCMD_USAGE);
+ }
+
+ if (mdb_vread(&obj, sizeof (struct drm_gem_object), addr) == -1) {
+ mdb_warn("failed to read gem object infor");
+ ret = DCMD_ERR;
+ goto err;
+ }
+
+ if (mdb_readvar(&mdb_track, "mdb_track_enable") == -1) {
+ mdb_warn("failed to read mdb_track");
+ mdb_track = 0;
+ }
+
+ if (mdb_track == 0) {
+ mdb_printf("mdb_track is not enabled. Please enable it "
+ "by set drm:mdb_track_enable=1");
+ goto err;
+ }
+
+ mdb_printf("Dump obj history\n");
+ mdb_printf("%s %20s %10s %10s\n", "event", "cur_seq", "last_seq",
+ "ring addr");
+ temp = (uintptr_t)obj.his_list.next;
+ while (condition) {
+ if (mdb_vread(&node, sizeof (struct list_head), temp) == -1) {
+ mdb_warn("failed to read his_list node");
+ ret = DCMD_ERR;
+ goto err;
+ }
+
+ if (node.contain_ptr == NULL)
+ break;
+
+ if (mdb_vread(&history_node, sizeof (struct drm_history_list),
+ (uintptr_t)node.contain_ptr) == -1) {
+ mdb_warn("failed to read history node");
+ ret = DCMD_ERR;
+ goto err;
+ }
+
+ mdb_printf("%s %-8d %-8d 0x%lx\n", history_node.info,
+ history_node.cur_seq, history_node.last_seq,
+ history_node.ring_ptr);
+
+ temp = (uintptr_t)node.next;
+ }
+
+err:
+ return (ret);
+}
+
+void
+i915_batch_history_help(void)
+{
+ mdb_printf("Print batchbuffer information\n"
+ "\nAll batchbuffers' information is printed one by one "
+ "from the latest one to the oldest one.\n"
+ "The information includes objects number, assigned SeqNO. "
+ "and objects list.\n"
+ "\nNOTE: Please make sure mdb_track is enabled in i915 "
+ "driver by setting mdb_track_enable\n");
+
+}
+
+/* ARGSUSED */
+static int
+i915_batch_history(uintptr_t addr, uint_t flags, int argc,
+ const mdb_arg_t *argv)
+{
+ struct list_head node;
+ uintptr_t temp;
+ struct batch_info_list batch_node;
+ caddr_t *obj_list;
+ int ret, i;
+ struct drm_i915_private *dev_priv;
+ int condition = 1;
+ int mdb_track = 0;
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ mdb_warn("failed to read history node");
+ goto err;
+ }
+
+ if (mdb_readvar(&mdb_track, "mdb_track_enable") == -1) {
+ mdb_warn("failed to read mdb_track");
+ mdb_track = 0;
+ }
+ if (mdb_track == 0) {
+ mdb_printf("mdb_track is not enabled. Please enable it by "
+ "set drm:mdb_track_enable=1");
+ goto err;
+ }
+
+ mdb_printf("Dump batchbuffer history\n");
+ temp = (uintptr_t)dev_priv->batch_list.next;
+ while (condition) {
+
+ if (mdb_vread(&node, sizeof (struct list_head), temp) == -1) {
+ mdb_warn("failed to read his_list node");
+ ret = DCMD_ERR;
+ goto err;
+ }
+
+ if (node.contain_ptr == NULL)
+ break;
+
+ if (mdb_vread(&batch_node, sizeof (struct batch_info_list),
+ (uintptr_t)node.contain_ptr) == -1) {
+ mdb_warn("failed to read batch node");
+ ret = DCMD_ERR;
+ goto err;
+ }
+
+ mdb_printf("batch buffer includes %d objects, seqno 0x%x\n",
+ batch_node.num, batch_node.seqno);
+
+ obj_list = mdb_alloc(batch_node.num * sizeof (caddr_t),
+ UM_SLEEP);
+ if (mdb_vread(obj_list, batch_node.num * sizeof (caddr_t),
+ (uintptr_t)batch_node.obj_list) == -1) {
+ mdb_warn("failed to read batch object list");
+ ret = DCMD_ERR;
+ goto err;
+ }
+
+
+ for (i = 0; i < batch_node.num; i++) {
+ mdb_printf("obj: 0x%lx\n", obj_list[i]);
+ }
+
+ mdb_free(obj_list, batch_node.num * sizeof (caddr_t));
+
+ temp = (uintptr_t)node.next;
+ }
+
+err:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+ return (ret);
+}
+
+static const char *yesno(int v)
+{
+ return (v ? "yes" : "no");
+}
+
+void
+i915_capabilities_help(void)
+{
+ mdb_printf("Print capability information for Intel graphics card\n"
+ "Fields printed:\n"
+ " is_mobile:\tMobile Platform\n"
+ " is_i85x:\tIntel I85x series graphics card\n"
+ " is_i915g:\tIntel I915g series graphics card\n"
+ " is_i945gm:\tIntel I945gm series graphics card\n"
+ " is_g33:\tIntel G33 series graphics card\n"
+ " need_gfx_hws:\tNeed setup graphics hardware status page\n"
+ " is_g4x:\tIntel G4x series graphics card\n"
+ " is_pineview:\tIntel Pineview series graphics card\n"
+ " is_broadwater:\tIntel Broadwater series graphics card\n"
+ " is_crestline:\tIntel Crestline series graphics card\n"
+ " is_ivybridge:\tIntel Ivybridge series graphics card\n"
+ " is_valleyview:\tIntel Valleyview series graphics card\n"
+ " has_force_wake:\tHas force awake\n"
+ " is_haswell:\tIntel Haswell series graphics card\n"
+ " has_fbc:\tHas Framebuffer compression\n"
+ " has_pipe_cxsr:\tHas CxSR\n"
+ " has_hotplug:\tHas output hotplug\n"
+ " cursor_needs_physical:\tHas physical cursor object\n"
+ " has_overlay:\tHas Overlay\n"
+ " overlay_needs_physical:\tHas physical overlay object\n"
+ " supports_tv:\tSupport TV output\n"
+ " has_bsd_ring:\tHas BSD ring\n"
+ " has_blt_ring:\tHas BLT ring\n"
+ " has_llc:\tHas last level cache\n");
+}
+
+/* ARGSUSED */
+static int
+i915_capabilities(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ int ret;
+ struct drm_device *drm_dev;
+ struct drm_i915_private *dev_priv;
+ struct intel_device_info info;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx\n", addr);
+ return (DCMD_OK);
+ }
+
+ drm_dev = mdb_alloc(sizeof (struct drm_device), UM_SLEEP);
+
+ ret = get_drm_dev(drm_dev);
+ if (ret == DCMD_ERR)
+ goto err1;
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err2;
+ }
+
+ if (mdb_vread(&info, sizeof (struct intel_device_info),
+ (uintptr_t)dev_priv->info) == -1) {
+ mdb_warn("failed to read i915 chip info");
+ ret = DCMD_ERR;
+ goto err2;
+ }
+
+ mdb_printf("gen: %d\n", info.gen);
+ mdb_printf("pch: %d\n", dev_priv->pch_type);
+
+
+/* BEGIN CSTYLED */
+#define SEP_SEMICOLON ;
+#define DEFINE_FLAG(x) mdb_printf(#x ": %s\n", yesno(info.x))
+ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+#undef DEFINE_FLAG
+#undef SEP_SEMICOLON
+/* END CSTYLED */
+
+err2:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+err1:
+ mdb_free(drm_dev, sizeof (struct drm_device));
+ return (ret);
+}
+
+void
+i915_request_list_node_help(void)
+{
+ mdb_printf("Print request list information for a given list pointer\n"
+ "The information includes request, SeqNO. and timestamp.\n");
+}
+
+/* ARGSUSED */
+static int
+i915_request_list_node(uintptr_t addr, uint_t flags, int argc,
+ const mdb_arg_t *argv)
+{
+ int ret = DCMD_OK;
+ struct list_head list;
+ struct drm_i915_gem_request *request;
+
+ if (!(flags & DCMD_ADDRSPEC))
+ return (DCMD_USAGE);
+
+ if (mdb_vread(&list, sizeof (struct list), addr) == -1) {
+ mdb_warn("failed to read list");
+ return (DCMD_ERR);
+ }
+
+ if (list.contain_ptr == 0) {
+ mdb_warn("no request!");
+ return (DCMD_ERR);
+ }
+
+ request = mdb_alloc(sizeof (struct drm_i915_gem_request), UM_SLEEP);
+ if (mdb_vread(request, sizeof (struct drm_i915_gem_request),
+ (uintptr_t)list.contain_ptr) == -1) {
+ mdb_warn("failed to read request infor");
+ ret = DCMD_ERR;
+ goto err;
+ }
+
+ mdb_printf("0x%lx ", list.contain_ptr);
+ mdb_printf(" %d @ %ld\n", request->seqno, request->emitted_jiffies);
+
+err:
+ mdb_free(request, sizeof (struct drm_i915_gem_request));
+ return (ret);
+}
+
+static int
+request_walk_list(uintptr_t addr, const char *str)
+{
+ struct list_head *head;
+ int ret = DCMD_OK;
+
+ mdb_printf("Dump %s ring request List %p\n", str, addr);
+
+ head = mdb_alloc(sizeof (struct list_head), UM_SLEEP);
+ if (mdb_vread(head, sizeof (struct list_head), addr) == -1) {
+ mdb_warn("failed to render ring request list");
+ ret = DCMD_ERR;
+ goto err;
+ }
+
+ if (mdb_pwalk_dcmd("head_list", "i915_request_list_node",
+ 0, NULL, (uintptr_t)head->prev) == -1) {
+ mdb_warn("failed to walk request head_list");
+ ret = DCMD_ERR;
+ goto err;
+ }
+
+err:
+ mdb_free(head, sizeof (struct list_head));
+ return (ret);
+
+}
+
+void
+i915_gem_request_info_help(void)
+{
+ mdb_printf("Print request list for each ring buffer\n"
+ "\nSee also: \"::help i915_request_list_node\"\n");
+}
+
+/* ARGSUSED */
+static int
+i915_gem_request_info(uintptr_t addr, uint_t flags, int argc,
+ const mdb_arg_t *argv)
+{
+ struct drm_i915_private *dev_priv;
+ int ret = DCMD_OK;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx, just ignore\n",
+ addr);
+ }
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+
+ ret = request_walk_list((uintptr_t)dev_priv->ring[0].request_list.next,
+ "RENDER");
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+ ret = request_walk_list((uintptr_t)dev_priv->ring[1].request_list.next,
+ "BSD");
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+ ret = request_walk_list((uintptr_t)dev_priv->ring[2].request_list.next,
+ "BLT");
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+
+err:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+ return (ret);
+}
+
+static int
+get_hws_info(uintptr_t addr, const char *str, int index)
+{
+ u32 *regs;
+ int i, ret = DCMD_OK;
+
+ regs = mdb_alloc(0x4000, UM_SLEEP);
+ if (mdb_vread(regs, 0x4000,
+ addr) == -1) {
+ mdb_warn("failed to read hardware status page");
+ ret = DCMD_ERR;
+ goto err;
+ }
+
+ if (index < 0) {
+ for (i = 0; i < 4096 / sizeof (u32) / 4; i += 4) {
+ mdb_printf("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i * 4, regs[i], regs[i + 1], regs[i + 2],
+ regs[i + 3]);
+ }
+ } else
+ mdb_printf("%s ring seqno %d \n", str, regs[index]);
+err:
+ mdb_free(regs, 0x4000);
+return (ret);
+
+}
+
+void
+i915_ring_seqno_info_help(void)
+{
+ mdb_printf("Print current SeqNO. for each ring buffer\n");
+}
+
+/* ARGSUSED */
+static int
+i915_ring_seqno_info(uintptr_t addr, uint_t flags, int argc,
+ const mdb_arg_t *argv)
+{
+ struct drm_i915_private *dev_priv;
+ int ret = DCMD_OK;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx\n", addr);
+ return (DCMD_OK);
+ }
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+
+ ret = get_hws_info((uintptr_t)dev_priv->ring[0].status_page.page_addr,
+ "RENDER", I915_GEM_HWS_INDEX);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+ ret = get_hws_info((uintptr_t)dev_priv->ring[1].status_page.page_addr,
+ "BSD", I915_GEM_HWS_INDEX);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+ ret = get_hws_info((uintptr_t)dev_priv->ring[2].status_page.page_addr,
+ "BLT", I915_GEM_HWS_INDEX);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+
+
+err:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+ return (ret);
+
+}
+
+void
+i915_gem_fence_regs_info_help(void)
+{
+ mdb_printf("Print current Fence registers information\n"
+ "The information includes object address, user pin flag, tiling mode,\n"
+ "size, read domain, write domain, read SeqNO, write SeqNO, fence "
+ "SeqNo,\n"
+ "catch level, dirty info, object name, pin count, fence reg number,\n"
+ "GTT offset, pin mappable and fault mappable.\n");
+}
+
+/* ARGSUSED */
+static int
+i915_gem_fence_regs_info(uintptr_t addr, uint_t flags, int argc,
+ const mdb_arg_t *argv)
+{
+ struct drm_i915_private *dev_priv;
+ int i, ret = DCMD_OK;
+ struct drm_i915_gem_object obj;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx\n", addr);
+ return (DCMD_OK);
+ }
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+
+ mdb_printf("Reserved fences start = %d\n", dev_priv->fence_reg_start);
+ mdb_printf("Total fences = %d\n", dev_priv->num_fence_regs);
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+
+ mdb_printf("Fence %d, pin count = %d, object = ",
+ i, dev_priv->fence_regs[i].pin_count);
+
+ if (dev_priv->fence_regs[i].obj != NULL) {
+ mdb_vread(&obj, sizeof (struct drm_i915_gem_object),
+ (uintptr_t)dev_priv->fence_regs[i].obj);
+ describe_obj(&obj);
+ } else {
+ mdb_printf("unused");
+ }
+ mdb_printf("\n");
+ }
+
+err:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+ return (ret);
+
+}
+
+void
+i915_interrupt_info_help(void)
+{
+ mdb_printf("Print debug register information\n"
+ "Registers printed:\n"
+ " IER:\tInterrupt Enable Register\n"
+ " IIR:\tInterrupt Identity Register\n"
+ " IMR:\tInterrupt Mask Register\n"
+ " ISR:\tInterrupt Status Register\n");
+}
+
+/* ARGSUSED */
+static int
+i915_interrupt_info(uintptr_t addr, uint_t flags, int argc,
+ const mdb_arg_t *argv)
+{
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ struct intel_device_info info;
+ int pipe, ret = DCMD_OK;
+ uint32_t val;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx\n", addr);
+ return (DCMD_OK);
+ }
+
+ dev = mdb_alloc(sizeof (struct drm_device), UM_SLEEP);
+ ret = get_drm_dev(dev);
+ if (ret == DCMD_ERR)
+ goto err1;
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err2;
+ }
+
+ if (mdb_vread(&info, sizeof (struct intel_device_info),
+ (uintptr_t)dev_priv->info) == -1) {
+ mdb_warn("failed to read i915 chip info");
+ ret = DCMD_ERR;
+ goto err2;
+ }
+
+ if (info.is_valleyview) {
+ ret = i915_read(dev_priv, (uintptr_t)VLV_IER, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Display IER:\t%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)VLV_IIR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Display IIR:\t%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)VLV_IIR_RW, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Display IIR_RW:\t%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)VLV_IMR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Display IMR:\t%08x\n", val);
+
+ for_each_pipe(pipe) {
+ ret = i915_read(dev_priv, PIPESTAT(pipe), &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Pipe %c stat:\t%08x\n",
+ pipe_name(pipe), val);
+ }
+ ret = i915_read(dev_priv, (uintptr_t)VLV_MASTER_IER, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Master IER:\t%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)GTIER, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Render IER:\t%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)GTIIR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Render IIR:\t%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)GTIMR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Render IMR:\t%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)GEN6_PMIER, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("PM IER:\t\t%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)GEN6_PMIIR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("PM IIR:\t\t%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)GEN6_PMIMR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("PM IMR:\t\t%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)PORT_HOTPLUG_EN, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Port hotplug:\t%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)VLV_DPFLIPSTAT, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("DPFLIPSTAT:\t%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)DPINVGTT, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("DPINVGTT:\t%08x\n", val);
+ } else if (dev_priv->pch_type == PCH_NONE) {
+ ret = i915_read(dev_priv, (uintptr_t)IER, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Interrupt enable: %08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)IIR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Interrupt identity: %08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)IMR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Interrupt mask: %08x\n", val);
+
+ for_each_pipe(pipe) {
+ ret = i915_read(dev_priv, (uintptr_t)PIPESTAT(pipe),
+ &val);
+ if (ret == DCMD_OK)
+ mdb_printf("Pipe %c stat: %08x\n",
+ pipe_name(pipe), val);
+ }
+ } else {
+ ret = i915_read(dev_priv, (uintptr_t)DEIER, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(
+ "North Display Interrupt enable: %08x\n",
+ val);
+
+ ret = i915_read(dev_priv, (uintptr_t)DEIIR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(
+ "North Display Interrupt identity: %08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)DEIMR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(
+ "North Display Interrupt mask: %08x\n",
+ val);
+
+ ret = i915_read(dev_priv, (uintptr_t)SDEIER, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(
+ "South Display Interrupt enable: %08x\n",
+ val);
+
+ ret = i915_read(dev_priv, (uintptr_t)SDEIIR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(
+ "South Display Interrupt identity: %08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)SDEIMR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(
+ "South Display Interrupt mask: %08x\n",
+ val);
+
+ ret = i915_read(dev_priv, (uintptr_t)GTIER, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(
+ "Graphics Interrupt enable: %08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)GTIIR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(
+ "Graphics Interrupt identity: %08x\n",
+ val);
+
+ ret = i915_read(dev_priv, (uintptr_t)GTIMR, &val);
+ if (ret == DCMD_OK)
+ mdb_printf(
+ "Graphics Interrupt mask: %08x\n", val);
+ }
+ mdb_printf("Interrupts received: %d\n", dev_priv->irq_received);
+
+err2:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+err1:
+ mdb_free(dev, sizeof (struct drm_device));
+
+ return (ret);
+
+}
+
+void
+i915_hws_info_help(void)
+{
+ mdb_printf("Print hardware status page for each RING\n");
+}
+
+/* ARGSUSED */
+static int
+i915_hws_info(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ struct drm_i915_private *dev_priv;
+ int ret = DCMD_OK;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx\n", addr);
+ return (DCMD_OK);
+ }
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+
+ mdb_printf("Hardware status page for %s\n", "RENDER");
+ ret = get_hws_info((uintptr_t)dev_priv->ring[0].status_page.page_addr,
+ "RENDER", -1);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+
+ mdb_printf("Hardware status page for %s\n", "BSD");
+ ret = get_hws_info((uintptr_t)dev_priv->ring[1].status_page.page_addr,
+ "BSD", -1);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+
+ mdb_printf("Hardware status page for %s\n", "BLT");
+ ret = get_hws_info((uintptr_t)dev_priv->ring[2].status_page.page_addr,
+ "BLT", -1);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+
+err:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+ return (ret);
+
+}
+
+void
+i915_fbc_status_help(void)
+{
+ mdb_printf("Print the status and reason for Framebuffer Compression "
+ "Enabling\n");
+}
+
+/* ARGSUSED */
+static int
+i915_fbc_status(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ struct intel_device_info info;
+ int ret = DCMD_OK;
+ uint32_t val;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx\n", addr);
+ return (DCMD_OK);
+ }
+
+ dev = mdb_alloc(sizeof (struct drm_device), UM_SLEEP);
+ ret = get_drm_dev(dev);
+ if (ret == DCMD_ERR)
+ goto err1;
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err2;
+ }
+
+ if (mdb_vread(&info, sizeof (struct intel_device_info),
+ (uintptr_t)dev_priv->info) == -1) {
+ mdb_warn("failed to read i915 chip info");
+ ret = DCMD_ERR;
+ goto err2;
+ }
+
+ if (!info.has_fbc) {
+ mdb_printf("FBC unsupported on this chipset\n");
+ goto err2;
+ }
+
+ if (dev_priv->pch_type != PCH_NONE) {
+ ret = i915_read(dev_priv, (uintptr_t)ILK_DPFC_CONTROL, &val);
+ if (ret == DCMD_OK) {
+ val &= DPFC_CTL_EN;
+ } else {
+ mdb_printf("Failed to read FBC register\n");
+ goto err2;
+ }
+ } else if (IS_GM45(dev)) {
+ ret = i915_read(dev_priv, (uintptr_t)DPFC_CONTROL, &val);
+ if (ret == DCMD_OK) {
+ val &= DPFC_CTL_EN;
+ } else {
+ mdb_printf("Failed to read FBC register\n");
+ goto err2;
+ }
+ } else if (info.is_crestline) {
+ ret = i915_read(dev_priv, (uintptr_t)FBC_CONTROL, &val);
+ if (ret == DCMD_OK) {
+ val &= FBC_CTL_EN;
+ } else {
+ mdb_printf("Failed to read FBC register\n");
+ goto err2;
+ }
+ }
+
+ if (val) {
+ mdb_printf("FBC enabled\n");
+ } else {
+ mdb_printf("FBC disabled: ");
+ switch (dev_priv->no_fbc_reason) {
+ case FBC_NO_OUTPUT:
+ mdb_printf("no outputs");
+ break;
+ case FBC_STOLEN_TOO_SMALL:
+ mdb_printf("not enough stolen memory");
+ break;
+ case FBC_UNSUPPORTED_MODE:
+ mdb_printf("mode not supported");
+ break;
+ case FBC_MODE_TOO_LARGE:
+ mdb_printf("mode too large");
+ break;
+ case FBC_BAD_PLANE:
+ mdb_printf("FBC unsupported on plane");
+ break;
+ case FBC_NOT_TILED:
+ mdb_printf("scanout buffer not tiled");
+ break;
+ case FBC_MULTIPLE_PIPES:
+ mdb_printf("multiple pipes are enabled");
+ break;
+ case FBC_MODULE_PARAM:
+ mdb_printf("disabled per module param (default off)");
+ break;
+ default:
+ mdb_printf("unknown reason");
+ }
+ mdb_printf("\n");
+ }
+
+err2:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+err1:
+ mdb_free(dev, sizeof (struct drm_device));
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+i915_sr_status(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ struct intel_device_info info;
+ int ret = DCMD_OK;
+ uint32_t val = 0;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx\n", addr);
+ return (DCMD_OK);
+ }
+
+ dev = mdb_alloc(sizeof (struct drm_device), UM_SLEEP);
+ ret = get_drm_dev(dev);
+ if (ret == DCMD_ERR)
+ goto err1;
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err2;
+ }
+
+ if (mdb_vread(&info, sizeof (struct intel_device_info),
+ (uintptr_t)dev_priv->info) == -1) {
+ mdb_warn("failed to read i915 chip info");
+ ret = DCMD_ERR;
+ goto err2;
+ }
+
+ if (dev_priv->pch_type != PCH_NONE) {
+ ret = i915_read(dev_priv, (uintptr_t)WM1_LP_ILK, &val);
+ if (ret == DCMD_OK) {
+ val &= WM1_LP_SR_EN;
+ } else {
+ mdb_printf("Failed to read sr register\n");
+ goto err2;
+ }
+ } else if (info.is_crestline || IS_I945G(dev) || info.is_i945gm) {
+ ret = i915_read(dev_priv, (uintptr_t)FW_BLC_SELF, &val);
+ if (ret == DCMD_OK) {
+ val &= FW_BLC_SELF_EN;
+ } else {
+ mdb_printf("Failed to read sr register\n");
+ goto err2;
+ }
+ } else if (IS_I915GM(dev)) {
+ ret = i915_read(dev_priv, (uintptr_t)INSTPM, &val);
+ if (ret == DCMD_OK) {
+ val &= INSTPM_SELF_EN;
+ } else {
+ mdb_printf("Failed to read sr register\n");
+ goto err2;
+ }
+ } else if (info.is_pineview) {
+ ret = i915_read(dev_priv, (uintptr_t)DSPFW3, &val);
+ if (ret == DCMD_OK) {
+ val &= PINEVIEW_SELF_REFRESH_EN;
+ } else {
+ mdb_printf("Failed to read sr register\n");
+ goto err2;
+ }
+ }
+
+ mdb_printf("self-refresh: %s\n",
+ val ? "enabled" : "disabled");
+
+err2:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+err1:
+ mdb_free(dev, sizeof (struct drm_device));
+
+ return (ret);
+}
+
+void
+i915_gem_framebuffer_info_help(void)
+{
+ mdb_printf("Print console framebuffer information\n"
+ "The information includes width, height, depth,\n"
+ "bits_per_pixel and object structure address\n");
+}
+
+/* ARGSUSED */
+static int
+i915_gem_framebuffer_info(uintptr_t addr, uint_t flags, int argc,
+ const mdb_arg_t *argv)
+{
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ struct intel_fbdev fbdev;
+ struct drm_framebuffer fb;
+ int ret = DCMD_OK;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx\n", addr);
+ return (DCMD_OK);
+ }
+
+ dev = mdb_alloc(sizeof (struct drm_device), UM_SLEEP);
+ ret = get_drm_dev(dev);
+ if (ret == DCMD_ERR)
+ goto err1;
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err2;
+ }
+
+ if (mdb_vread(&fbdev, sizeof (struct intel_fbdev),
+ (uintptr_t)dev_priv->fbdev) == -1) {
+ mdb_warn("failed to read intel_fbdev info");
+ ret = DCMD_ERR;
+ goto err2;
+ }
+
+ if (mdb_vread(&fb, sizeof (struct drm_framebuffer),
+ (uintptr_t)fbdev.helper.fb) == -1) {
+ mdb_warn("failed to read framebuffer info");
+ ret = DCMD_ERR;
+ goto err2;
+ }
+
+ mdb_printf("fbcon size: %d x %d, depth %d, %d bpp, obj %p",
+ fb.width,
+ fb.height,
+ fb.depth,
+ fb.bits_per_pixel,
+ fb.base);
+ mdb_printf("\n");
+
+err2:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+err1:
+ mdb_free(dev, sizeof (struct drm_device));
+
+ return (ret);
+}
+
+/* ARGSUSED */
+static int
+i915_gen6_forcewake_count_info(uintptr_t addr, uint_t flags, int argc,
+ const mdb_arg_t *argv)
+{
+ struct drm_i915_private *dev_priv;
+ int ret = DCMD_OK;
+ unsigned forcewake_count;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx\n", addr);
+ return (DCMD_OK);
+ }
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err;
+ }
+
+ forcewake_count = dev_priv->forcewake_count;
+
+ mdb_printf("forcewake count = %u\n", forcewake_count);
+
+err:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+
+ return (ret);
+}
+
+static const char *swizzle_string(unsigned swizzle)
+{
+ switch (swizzle) {
+ case I915_BIT_6_SWIZZLE_NONE:
+ return ("none");
+ case I915_BIT_6_SWIZZLE_9:
+ return ("bit9");
+ case I915_BIT_6_SWIZZLE_9_10:
+ return ("bit9/bit10");
+ case I915_BIT_6_SWIZZLE_9_11:
+ return ("bit9/bit11");
+ case I915_BIT_6_SWIZZLE_9_10_11:
+ return ("bit9/bit10/bit11");
+ case I915_BIT_6_SWIZZLE_9_17:
+ return ("bit9/bit17");
+ case I915_BIT_6_SWIZZLE_9_10_17:
+ return ("bit9/bit10/bit17");
+ case I915_BIT_6_SWIZZLE_UNKNOWN:
+ return ("unkown");
+ }
+
+ return ("bug");
+}
+
+void
+i915_swizzle_info_help(void)
+{
+ mdb_printf("Print object swizzle information\n"
+ "Registers printed:\n"
+ " TILECTL:\tTile Control\n"
+ " ARB_MODE:\tArbiter Mode Control Register\n"
+ " DISP_ARB_CTL:\tDisplay Arbiter Control\n");
+
+}
+
+/* ARGSUSED */
+static int
+i915_swizzle_info(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ struct intel_device_info info;
+ int ret = DCMD_OK;
+ uint32_t val = 0;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx\n", addr);
+ return (DCMD_OK);
+ }
+
+ dev = mdb_alloc(sizeof (struct drm_device), UM_SLEEP);
+ ret = get_drm_dev(dev);
+ if (ret == DCMD_ERR)
+ goto err1;
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err2;
+ }
+
+ if (mdb_vread(&info, sizeof (struct intel_device_info),
+ (uintptr_t)dev_priv->info) == -1) {
+ mdb_warn("failed to read i915 chip info");
+ ret = DCMD_ERR;
+ goto err2;
+ }
+
+ mdb_printf("bit6 swizzle for X-tiling = %s\n",
+ swizzle_string(dev_priv->mm.bit_6_swizzle_x));
+ mdb_printf("bit6 swizzle for Y-tiling = %s\n",
+ swizzle_string(dev_priv->mm.bit_6_swizzle_y));
+
+ if ((info.gen == 3) || (info.gen == 4)) {
+ ret = i915_read(dev_priv, (uintptr_t)DCC, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("DDC = 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)C0DRB3, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("C0DRB3 = 0x%04x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)C1DRB3, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("C1DRB3 = 0x%04x\n", val);
+
+ } else if ((info.gen == 6) || (info.gen == 7)) {
+ ret = i915_read(dev_priv, (uintptr_t)MAD_DIMM_C0, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("MAD_DIMM_C0 = 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)MAD_DIMM_C1, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("MAD_DIMM_C1 = 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)MAD_DIMM_C2, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("MAD_DIMM_C2 = 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)TILECTL, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("TILECTL = 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)ARB_MODE, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("ARB_MODE = 0x%08x\n", val);
+
+ ret = i915_read(dev_priv, (uintptr_t)DISP_ARB_CTL, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("DISP_ARB_CTL = 0x%08x\n", val);
+
+ }
+
+err2:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+err1:
+ mdb_free(dev, sizeof (struct drm_device));
+
+ return (ret);
+}
+
+void
+i915_ppgtt_info_help(void)
+{
+ mdb_printf("Print Per-Process GTT (PPGTT) information\n"
+ "Registers printed:\n"
+ " GFX_MODE:\tGraphics Mode Register\n"
+ " PP_DIR_BASE:\tPage Directory Base Register\n"
+ " ECOCHK:\tMain Graphic Arbiter Misc Register\n");
+}
+
+/* ARGSUSED */
+static int
+i915_ppgtt_info(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
+{
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ struct intel_device_info info;
+ int i, ret = DCMD_OK;
+ uint32_t val = 0;
+
+ if (flags & DCMD_ADDRSPEC) {
+ mdb_printf("don't need to set address 0x%lx\n", addr);
+ return (DCMD_OK);
+ }
+
+ dev = mdb_alloc(sizeof (struct drm_device), UM_SLEEP);
+ ret = get_drm_dev(dev);
+ if (ret == DCMD_ERR)
+ goto err1;
+
+ dev_priv = mdb_alloc(sizeof (struct drm_i915_private), UM_SLEEP);
+ ret = get_i915_private(dev_priv);
+ if (ret != DCMD_OK) {
+ goto err2;
+ }
+
+ if (mdb_vread(&info, sizeof (struct intel_device_info),
+ (uintptr_t)dev_priv->info) == -1) {
+ mdb_warn("failed to read i915 chip info");
+ ret = DCMD_ERR;
+ goto err2;
+ }
+
+ if (info.gen == 6) {
+ ret = i915_read(dev_priv, (uintptr_t)GFX_MODE, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("GFX_MODE: 0x%08x\n", val);
+ }
+
+ for (i = 0; i < 3; i ++) {
+ mdb_printf("RING %d\n", i);
+ if (info.gen == 7) {
+ ret = i915_read(dev_priv,
+ (uintptr_t)(dev_priv->ring[i].mmio_base + 0x29c),
+ &val);
+ if (ret == DCMD_OK)
+ mdb_printf("GFX_MODE: 0x%08x\n", val);
+ }
+ ret = i915_read(dev_priv,
+ (uintptr_t)(dev_priv->ring[i].mmio_base + 0x228), &val);
+ if (ret == DCMD_OK)
+ mdb_printf("PP_DIR_BASE: 0x%08x\n", val);
+
+ ret = i915_read(dev_priv,
+ (uintptr_t)(dev_priv->ring[i].mmio_base + 0x518), &val);
+ if (ret == DCMD_OK)
+ mdb_printf("PP_DIR_BASE_READ: 0x%08x\n", val);
+
+ ret = i915_read(dev_priv,
+ (uintptr_t)(dev_priv->ring[i].mmio_base + 0x220), &val);
+ if (ret == DCMD_OK)
+ mdb_printf("PP_DIR_DCLV: 0x%08x\n", val);
+ }
+
+ if (dev_priv->mm.aliasing_ppgtt) {
+ struct i915_hw_ppgtt ppgtt;
+ if (mdb_vread(&ppgtt, sizeof (struct i915_hw_ppgtt),
+ (uintptr_t)dev_priv->mm.aliasing_ppgtt) == -1) {
+ mdb_warn("failed to read aliasing_ppgtt info");
+ ret = DCMD_ERR;
+ goto err2;
+ }
+
+ mdb_printf("aliasing PPGTT:\n");
+ mdb_printf("pd gtt offset: 0x%08x\n", ppgtt.pd_offset);
+ }
+
+ ret = i915_read(dev_priv, (uintptr_t)GAM_ECOCHK, &val);
+ if (ret == DCMD_OK)
+ mdb_printf("ECOCHK: 0x%08x\n", val);
+
+err2:
+ mdb_free(dev_priv, sizeof (struct drm_i915_private));
+err1:
+ mdb_free(dev, sizeof (struct drm_device));
+
+ return (ret);
+}
+
+/*
+ * MDB module linkage information:
+ *
+ * We declare a list of structures describing our dcmds, a list of structures
+ * describing our walkers, and a function named _mdb_init to return a pointer
+ * to our module information.
+ */
+
+static const mdb_dcmd_t dcmds[] = {
+ {
+ "i915_pciid",
+ "?",
+ "pciid information",
+ i915_pciid,
+ i915_pciid_help
+ },
+ {
+ "i915_gtt_total",
+ "?",
+ "get gtt total size",
+ i915_gtt_total,
+ i915_gtt_total_help
+ },
+ {
+ "i915_obj_list",
+ "[-aibu]",
+ "i915 objects list information",
+ i915_obj_list,
+ i915_obj_list_help
+ },
+ {
+ "i915_obj_list_node",
+ ":",
+ "i915 objects list node information",
+ i915_obj_list_node,
+ i915_obj_list_node_help
+ },
+ {
+ "i915_ringbuffer_info",
+ "[-lrs]",
+ "i915 ring buffer information",
+ i915_ringbuffer_info,
+ i915_ringbuffer_info_help
+ },
+ {
+ "i915_gtt_dump",
+ "?",
+ "dump gtt_dump_add address",
+ i915_gtt_dump,
+ i915_gtt_dump_help
+ },
+ {
+ "i915_register_read",
+ ":",
+ "read i915 register",
+ i915_register_read,
+ i915_register_read_help
+ },
+ {
+ "i915_error_reg_dump",
+ "?",
+ "i915 error registers dump",
+ i915_error_reg_dump,
+ i915_error_reg_dump_help
+ },
+ {
+ "i915_obj_history",
+ ":",
+ "i915 objects track information",
+ i915_obj_history,
+ i915_obj_history_help
+ },
+ {
+ "i915_batch_history",
+ "?",
+ "i915 objects track information",
+ i915_batch_history,
+ i915_batch_history_help
+ },
+ {
+ "i915_capabilities",
+ "?",
+ "i915 capabilities information",
+ i915_capabilities,
+ i915_capabilities_help
+ },
+ {
+ "i915_request_list_node",
+ ":",
+ "i915 request list node information",
+ i915_request_list_node,
+ i915_request_list_node_help
+ },
+ {
+ "i915_gem_request",
+ "?",
+ "i915 gem request information",
+ i915_gem_request_info,
+ i915_gem_request_info_help
+ },
+ {
+ "i915_ring_seqno",
+ "?",
+ "ring seqno information",
+ i915_ring_seqno_info,
+ i915_ring_seqno_info_help
+ },
+ {
+ "i915_gem_fence_regs",
+ "?",
+ "fence register information",
+ i915_gem_fence_regs_info,
+ i915_gem_fence_regs_info_help
+ },
+ {
+ "i915_interrupt",
+ "?",
+ "interrupt information",
+ i915_interrupt_info,
+ i915_interrupt_info_help
+ },
+ {
+ "i915_gem_hws",
+ "?",
+ "hardware status page",
+ i915_hws_info,
+ i915_hws_info_help
+ },
+ {
+ "i915_fbc_status",
+ "?",
+ "framebuffer compression information",
+ i915_fbc_status,
+ i915_fbc_status_help
+ },
+ {
+ "i915_sr_status",
+ "?",
+ "Print self-refresh status",
+ i915_sr_status,
+ NULL
+ },
+ {
+ "i915_gem_framebuffer",
+ "?",
+ "Print framebuffer information",
+ i915_gem_framebuffer_info,
+ i915_gem_framebuffer_info_help
+ },
+ {
+ "i915_gen6_forcewake_count",
+ "?",
+ "Print forcewake count",
+ i915_gen6_forcewake_count_info,
+ NULL
+ },
+ {
+ "i915_swizzle",
+ "?",
+ "Print swizzle information",
+ i915_swizzle_info,
+ i915_swizzle_info_help
+ },
+ {
+ "i915_ppgtt",
+ "?",
+ "Print ppgtt information",
+ i915_ppgtt_info,
+ i915_ppgtt_info_help
+ },
+
+ { NULL }
+};
+
+static const mdb_walker_t walkers[] = {
+ {
+ "head_list",
+ "walk head list",
+ head_list_walk_init,
+ head_list_walk_step,
+ head_list_walk_fini
+ },
+ { NULL }
+};
+
+static const mdb_modinfo_t modinfo = {
+ MDB_API_VERSION, dcmds, walkers
+};
+
+const mdb_modinfo_t *
+_mdb_init(void)
+{
+ return (&modinfo);
+}
diff --git a/usr/src/pkg/manifests/driver-graphics-drm.p5m b/usr/src/pkg/manifests/driver-graphics-drm.p5m
new file mode 100644
index 0000000..5f011bb
--- /dev/null
+++ b/usr/src/pkg/manifests/driver-graphics-drm.p5m
@@ -0,0 +1,145 @@
+#
+# Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+#
+
+#
+# The default for payload-bearing actions in this package is to appear in the
+# global zone only. See the include file for greater detail, as well as
+# information about overriding the defaults.
+#
+<include global_zone_only_component>
+set name=pkg.fmri value=pkg:/driver/graphics/drm@$(OSPKGVERS)
+set name=pkg.summary value="DRM Kernel Drivers"
+set name=pkg.description \
+ value="Direct Rendering Manager kernel drivers and modules. Currently supports Intel(R) integrated graphics devices with hardware acceleration, video memory management, Graphics Execution Manager (GEM), and kernel modesetting (KMS)."
+set name=info.upstream value="Direct Rendering Infrastructure (DRI) project"
+set name=info.upstream-url value=http://dri.freedesktop.org/
+set name=variant.arch value=i386
+file path=kernel/drv/$(ARCH64)/i915 group=sys
+file path=kernel/kmdb/$(ARCH64)/i915 group=sys
+file path=kernel/misc/$(ARCH64)/drm group=sys
+file path=usr/lib/mdb/kvm/$(ARCH64)/i915.so group=sys
+file path=usr/share/man/man4d/i915.4d
+driver name=i915 perms="* 0644 root sys" \
+ alias=pci8086,42 \
+ alias=pci8086,46 \
+ alias=pci8086,102 \
+ alias=pci8086,106 \
+ alias=pci8086,10a \
+ alias=pci8086,112 \
+ alias=pci8086,116 \
+ alias=pci8086,122 \
+ alias=pci8086,126 \
+ alias=pci8086,152 \
+ alias=pci8086,156 \
+ alias=pci8086,15a \
+ alias=pci8086,162 \
+ alias=pci8086,166 \
+ alias=pci8086,16a \
+ alias=pci8086,402 \
+ alias=pci8086,406 \
+ alias=pci8086,40a \
+ alias=pci8086,40b \
+ alias=pci8086,40e \
+ alias=pci8086,412 \
+ alias=pci8086,416 \
+ alias=pci8086,41a \
+ alias=pci8086,41b \
+ alias=pci8086,41e \
+ alias=pci8086,422 \
+ alias=pci8086,426 \
+ alias=pci8086,42a \
+ alias=pci8086,42b \
+ alias=pci8086,42e \
+ alias=pci8086,a02 \
+ alias=pci8086,a06 \
+ alias=pci8086,a0a \
+ alias=pci8086,a0b \
+ alias=pci8086,a0e \
+ alias=pci8086,a12 \
+ alias=pci8086,a16 \
+ alias=pci8086,a1a \
+ alias=pci8086,a1b \
+ alias=pci8086,a1e \
+ alias=pci8086,a22 \
+ alias=pci8086,a26 \
+ alias=pci8086,a2a \
+ alias=pci8086,a2b \
+ alias=pci8086,a2e \
+ alias=pci8086,c02 \
+ alias=pci8086,c06 \
+ alias=pci8086,c0a \
+ alias=pci8086,c0b \
+ alias=pci8086,c0e \
+ alias=pci8086,c12 \
+ alias=pci8086,c16 \
+ alias=pci8086,c1a \
+ alias=pci8086,c1b \
+ alias=pci8086,c1e \
+ alias=pci8086,c22 \
+ alias=pci8086,c26 \
+ alias=pci8086,c2a \
+ alias=pci8086,c2b \
+ alias=pci8086,c2e \
+ alias=pci8086,d02 \
+ alias=pci8086,d06 \
+ alias=pci8086,d0a \
+ alias=pci8086,d0b \
+ alias=pci8086,d0e \
+ alias=pci8086,d12 \
+ alias=pci8086,d16 \
+ alias=pci8086,d1a \
+ alias=pci8086,d1b \
+ alias=pci8086,d1e \
+ alias=pci8086,d22 \
+ alias=pci8086,d26 \
+ alias=pci8086,d2a \
+ alias=pci8086,d2b \
+ alias=pci8086,d2e \
+ alias=pci8086,2562 \
+ alias=pci8086,2572 \
+ alias=pci8086,2582 \
+ alias=pci8086,2592 \
+ alias=pci8086,2772 \
+ alias=pci8086,27a2 \
+ alias=pci8086,27ae \
+ alias=pci8086,2972 \
+ alias=pci8086,2982 \
+ alias=pci8086,2992 \
+ alias=pci8086,29a2 \
+ alias=pci8086,29b2 \
+ alias=pci8086,29c2 \
+ alias=pci8086,29d2 \
+ alias=pci8086,2a02 \
+ alias=pci8086,2a12 \
+ alias=pci8086,2a42 \
+ alias=pci8086,2e02.8086.2e02 \
+ alias=pci8086,2e12 \
+ alias=pci8086,2e22 \
+ alias=pci8086,2e32 \
+ alias=pci8086,2e42 \
+ alias=pci8086,3582 \
+ alias=pci8086,a001 \
+ alias=pci8086,a011
+legacy pkg=SUNWdrmr desc="Direct Rendering Manager kernel drivers and modules" \
+ name="DRM Kernel Drivers, (Root)"
diff --git a/usr/src/uts/common/drm/drm.h b/usr/src/uts/common/drm/drm.h
new file mode 100644
index 0000000..1e7c192
--- /dev/null
+++ b/usr/src/uts/common/drm/drm.h
@@ -0,0 +1,928 @@
+/**
+ * \file drm.h
+ * Header for the Direct Rendering Manager
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ * \par Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_H_
+#define _DRM_H_
+
+#if defined(__linux__)
+
+#include <linux/types.h>
+#include <asm/ioctl.h>
+typedef unsigned int drm_handle_t;
+
+#else /* One of the BSDs or Solaris */
+
+#include <sys/ioccom.h>
+#include <sys/types.h>
+typedef int8_t __s8;
+typedef uint8_t __u8;
+typedef int16_t __s16;
+typedef uint16_t __u16;
+typedef int32_t __s32;
+typedef uint32_t __u32;
+typedef int64_t __s64;
+typedef uint64_t __u64;
+
+#if defined(__SOLARIS__) || defined(__sun)
+#include <sys/types32.h>
+typedef unsigned long long drm_handle_t;
+
+#else /* !__SOLARIS__ */
+typedef unsigned long drm_handle_t;
+
+#endif /* __SOLARIS__ || __sun */
+
+
+#endif /* __linux__ */
+/* Solaris-specific. */
+#if defined(__SOLARIS__) || defined(__sun)
+
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+#define _IOC_SIZEBITS 13
+#define _IOC_DIRBITS 3
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT + _IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT + _IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT + _IOC_SIZEBITS)
+
+#define _IOC(dir, type, nr, size) \
+ (((dir) /* already shifted */) | \
+ ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
+#define _IOC_TYPE(req) ((req >> _IOC_TYPESHIFT) & ((1 << _IOC_TYPEBITS) -1))
+
+#endif /* __Solaris__ or __sun */
+
+#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
+#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
+#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
+#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
+
+#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
+#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
+#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
+#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
+#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
+
+typedef unsigned int drm_context_t;
+typedef unsigned int drm_drawable_t;
+typedef unsigned int drm_magic_t;
+
+/**
+ * Cliprect.
+ *
+ * \warning: If you change this structure, make sure you change
+ * XF86DRIClipRectRec in the server as well
+ *
+ * \note KW: Actually it's illegal to change either for
+ * backwards-compatibility reasons.
+ */
+struct drm_clip_rect {
+ unsigned short x1;
+ unsigned short y1;
+ unsigned short x2;
+ unsigned short y2;
+};
+
+/**
+ * Drawable information.
+ */
+struct drm_drawable_info {
+ unsigned int num_rects;
+ struct drm_clip_rect *rects;
+};
+
+/**
+ * Texture region,
+ */
+struct drm_tex_region {
+ unsigned char next;
+ unsigned char prev;
+ unsigned char in_use;
+ unsigned char padding;
+ unsigned int age;
+};
+
+/**
+ * Hardware lock.
+ *
+ * The lock structure is a simple cache-line aligned integer. To avoid
+ * processor bus contention on a multiprocessor system, there should not be any
+ * other data stored in the same cache line.
+ */
+struct drm_hw_lock {
+ __volatile__ unsigned int lock; /**< lock variable */
+ char padding[60]; /**< Pad to cache line */
+};
+
+/**
+ * DRM_IOCTL_VERSION ioctl argument type.
+ *
+ * \sa drmGetVersion().
+ */
+struct drm_version {
+ int version_major; /**< Major version */
+ int version_minor; /**< Minor version */
+ int version_patchlevel; /**< Patch level */
+ size_t name_len; /**< Length of name buffer */
+ char *name; /**< Name of driver */
+ size_t date_len; /**< Length of date buffer */
+ char *date; /**< User-space buffer to hold date */
+ size_t desc_len; /**< Length of desc buffer */
+ char *desc; /**< User-space buffer to hold desc */
+};
+
+/**
+ * DRM_IOCTL_GET_UNIQUE ioctl argument type.
+ *
+ * \sa drmGetBusid() and drmSetBusId().
+ */
+struct drm_unique {
+ size_t unique_len; /**< Length of unique */
+ char *unique; /**< Unique name for driver instantiation */
+};
+
+struct drm_list {
+ int count; /**< Length of user-space structures */
+ struct drm_version *version;
+};
+
+struct drm_block {
+ int unused;
+};
+
+/**
+ * DRM_IOCTL_CONTROL ioctl argument type.
+ *
+ * \sa drmCtlInstHandler() and drmCtlUninstHandler().
+ */
+struct drm_control {
+ enum {
+ DRM_ADD_COMMAND,
+ DRM_RM_COMMAND,
+ DRM_INST_HANDLER,
+ DRM_UNINST_HANDLER
+ } func;
+ int irq;
+};
+
+/**
+ * Type of memory to map.
+ */
+enum drm_map_type {
+ _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
+ _DRM_REGISTERS = 1, /**< no caching, no core dump */
+ _DRM_SHM = 2, /**< shared, cached */
+ _DRM_AGP = 3, /**< AGP/GART */
+ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
+ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
+ _DRM_GEM = 6 /**< GEM object */
+};
+
+/**
+ * Memory mapping flags.
+ */
+enum drm_map_flags {
+ _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
+ _DRM_READ_ONLY = 0x02,
+ _DRM_LOCKED = 0x04, /**< shared, cached, locked */
+ _DRM_KERNEL = 0x08, /**< kernel requires access */
+ _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
+ _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
+ _DRM_REMOVABLE = 0x40, /**< Removable mapping */
+ _DRM_DRIVER = 0x80 /**< Managed by driver */
+};
+
+struct drm_ctx_priv_map {
+ unsigned int ctx_id; /**< Context requesting private mapping */
+ void *handle; /**< Handle of map */
+};
+
+/**
+ * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
+ * argument type.
+ *
+ * \sa drmAddMap().
+ */
+struct drm_map {
+ unsigned long long offset; /**< Requested physical address (0 for SAREA)*/
+ unsigned long size; /**< Requested physical size (bytes) */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
+ unsigned long long handle; /**< User-space: "Handle" to pass to mmap() */
+ /**< Kernel-space: kernel-virtual address */
+ int mtrr; /**< MTRR slot used */
+ /* Private data */
+};
+
+/**
+ * DRM_IOCTL_GET_CLIENT ioctl argument type.
+ */
+struct drm_client {
+ int idx; /**< Which client desired? */
+ int auth; /**< Is client authenticated? */
+ unsigned long pid; /**< Process ID */
+ unsigned long uid; /**< User ID */
+ unsigned long magic; /**< Magic */
+ unsigned long iocs; /**< Ioctl count */
+};
+
+enum drm_stat_type {
+ _DRM_STAT_LOCK,
+ _DRM_STAT_OPENS,
+ _DRM_STAT_CLOSES,
+ _DRM_STAT_IOCTLS,
+ _DRM_STAT_LOCKS,
+ _DRM_STAT_UNLOCKS,
+ _DRM_STAT_VALUE, /**< Generic value */
+ _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
+ _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
+
+ _DRM_STAT_IRQ, /**< IRQ */
+ _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
+ _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
+ _DRM_STAT_DMA, /**< DMA */
+ _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
+ _DRM_STAT_MISSED /**< Missed DMA opportunity */
+ /* Add to the *END* of the list */
+};
+
+/**
+ * DRM_IOCTL_GET_STATS ioctl argument type.
+ */
+struct drm_stats {
+ unsigned long count;
+ struct {
+ unsigned long value;
+ enum drm_stat_type type;
+ } data[15];
+};
+
+/**
+ * Hardware locking flags.
+ */
+enum drm_lock_flags {
+ _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
+ _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
+ _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
+ _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
+ /* These *HALT* flags aren't supported yet
+ -- they will be used to support the
+ full-screen DGA-like mode. */
+ _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
+ _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
+};
+
+/**
+ * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
+ *
+ * \sa drmGetLock() and drmUnlock().
+ */
+struct drm_lock {
+ int context;
+ enum drm_lock_flags flags;
+};
+
+/**
+ * DMA flags
+ *
+ * \warning
+ * These values \e must match xf86drm.h.
+ *
+ * \sa drm_dma.
+ */
+enum drm_dma_flags {
+ /* Flags for DMA buffer dispatch */
+ _DRM_DMA_BLOCK = 0x01, /**<
+ * Block until buffer dispatched.
+ *
+ * \note The buffer may not yet have
+ * been processed by the hardware --
+ * getting a hardware lock with the
+ * hardware quiescent will ensure
+ * that the buffer has been
+ * processed.
+ */
+ _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
+ _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
+
+ /* Flags for DMA buffer request */
+ _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
+ _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
+ _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
+};
+
+/**
+ * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
+ *
+ * \sa drmAddBufs().
+ */
+
+typedef enum {
+ _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
+ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
+ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
+ _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
+ _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
+} drm_buf_flag;
+
+
+struct drm_buf_desc {
+ int count; /**< Number of buffers of this size */
+ int size; /**< Size in bytes */
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+ drm_buf_flag flags;
+ unsigned long agp_start; /**<
+ * Start address of where the AGP buffers are
+ * in the AGP aperture
+ */
+};
+
+/**
+ * DRM_IOCTL_INFO_BUFS ioctl argument type.
+ */
+struct drm_buf_info {
+ int count; /**< Entries in list */
+ struct drm_buf_desc *list;
+};
+
+/**
+ * DRM_IOCTL_FREE_BUFS ioctl argument type.
+ */
+struct drm_buf_free {
+ int count;
+ int *list;
+};
+
+/**
+ * Buffer information
+ *
+ * \sa drm_buf_map.
+ */
+struct drm_buf_pub {
+ int idx; /**< Index into the master buffer list */
+ int total; /**< Buffer size */
+ int used; /**< Amount of buffer in use (for DMA) */
+ void *address; /**< Address of buffer */
+};
+
+/**
+ * DRM_IOCTL_MAP_BUFS ioctl argument type.
+ */
+struct drm_buf_map {
+ int count; /**< Length of the buffer list */
+#ifdef __cplusplus
+ void *virt;
+#else
+ void *virtual; /**< Mmap'd area in user-virtual */
+#endif
+ struct drm_buf_pub *list; /**< Buffer information */
+ int fd;
+};
+
+/**
+ * DRM_IOCTL_DMA ioctl argument type.
+ *
+ * Indices here refer to the offset into the buffer list in drm_buf_get.
+ *
+ * \sa drmDMA().
+ */
+struct drm_dma {
+ int context; /**< Context handle */
+ int send_count; /**< Number of buffers to send */
+ int *send_indices; /**< List of handles to buffers */
+ int *send_sizes; /**< Lengths of data to send */
+ enum drm_dma_flags flags; /**< Flags */
+ int request_count; /**< Number of buffers requested */
+ int request_size; /**< Desired size for buffers */
+ int *request_indices; /**< Buffer information */
+ int *request_sizes;
+ int granted_count; /**< Number of buffers granted */
+};
+
+enum drm_ctx_flags {
+ _DRM_CONTEXT_PRESERVED = 0x01,
+ _DRM_CONTEXT_2DONLY = 0x02
+};
+
+/**
+ * DRM_IOCTL_ADD_CTX ioctl argument type.
+ *
+ * \sa drmCreateContext() and drmDestroyContext().
+ */
+struct drm_ctx {
+ drm_context_t handle;
+ enum drm_ctx_flags flags;
+};
+
+/**
+ * DRM_IOCTL_RES_CTX ioctl argument type.
+ */
+struct drm_ctx_res {
+ int count;
+ struct drm_ctx *contexts;
+};
+
+/**
+ * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
+ */
+struct drm_draw {
+ drm_drawable_t handle;
+};
+
+/**
+ * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
+ */
+typedef enum {
+ DRM_DRAWABLE_CLIPRECTS
+} drm_drawable_info_type_t;
+
+struct drm_update_draw {
+ drm_drawable_t handle;
+ unsigned int type;
+ unsigned int num;
+ unsigned long long data;
+};
+
+/**
+ * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
+ */
+struct drm_auth {
+ drm_magic_t magic;
+};
+
+/**
+ * DRM_IOCTL_IRQ_BUSID ioctl argument type.
+ *
+ * \sa drmGetInterruptFromBusID().
+ */
+struct drm_irq_busid {
+ int irq; /**< IRQ number */
+ int busnum; /**< bus number */
+ int devnum; /**< device number */
+ int funcnum; /**< function number */
+};
+
+enum drm_vblank_seq_type {
+ _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
+ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
+ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
+ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
+ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
+ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
+};
+#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
+#define _DRM_VBLANK_HIGH_CRTC_MASK 0x0000003e
+
+#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
+#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
+ _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
+
+struct drm_wait_vblank_request {
+ enum drm_vblank_seq_type type;
+ unsigned int sequence;
+ unsigned long signal;
+};
+
+struct drm_wait_vblank_reply {
+ enum drm_vblank_seq_type type;
+ unsigned int sequence;
+#if defined(__sun)
+ time_t tval_sec;
+ suseconds_t tval_usec;
+#else
+ long tval_sec;
+ long tval_usec;
+#endif
+};
+
+/**
+ * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
+ *
+ * \sa drmWaitVBlank().
+ */
+union drm_wait_vblank {
+ struct drm_wait_vblank_request request;
+ struct drm_wait_vblank_reply reply;
+};
+
+#define _DRM_PRE_MODESET 1
+#define _DRM_POST_MODESET 2
+
+/**
+ * DRM_IOCTL_MODESET_CTL ioctl argument type
+ *
+ * \sa drmModesetCtl().
+ */
+struct drm_modeset_ctl {
+ __u32 crtc;
+ __u32 cmd;
+};
+
+/**
+ * DRM_IOCTL_AGP_ENABLE ioctl argument type.
+ *
+ * \sa drmAgpEnable().
+ */
+struct drm_agp_mode {
+ unsigned long mode; /**< AGP mode */
+};
+
+/**
+ * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
+ *
+ * \sa drmAgpAlloc() and drmAgpFree().
+ */
+struct drm_agp_buffer {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for binding / unbinding */
+ unsigned long type; /**< Type of memory to allocate */
+ unsigned long physical; /**< Physical used by i810 */
+};
+
+/**
+ * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
+ *
+ * \sa drmAgpBind() and drmAgpUnbind().
+ */
+struct drm_agp_binding {
+ unsigned long handle; /**< From drm_agp_buffer */
+ unsigned long offset; /**< In bytes -- will round to page boundary */
+};
+
+/**
+ * DRM_IOCTL_AGP_INFO ioctl argument type.
+ *
+ * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
+ * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
+ * drmAgpVendorId() and drmAgpDeviceId().
+ */
+struct drm_agp_info {
+ int agp_version_major;
+ int agp_version_minor;
+ unsigned long mode;
+ unsigned long aperture_base; /* physical address */
+ unsigned long aperture_size; /* bytes */
+ unsigned long memory_allowed; /* bytes */
+ unsigned long memory_used;
+
+ /* PCI information */
+ unsigned short id_vendor;
+ unsigned short id_device;
+};
+
+/**
+ * DRM_IOCTL_SG_ALLOC ioctl argument type.
+ */
+struct drm_scatter_gather {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for mapping / unmapping */
+};
+
+/**
+ * DRM_IOCTL_SET_VERSION ioctl argument type.
+ */
+struct drm_set_version {
+ int drm_di_major;
+ int drm_di_minor;
+ int drm_dd_major;
+ int drm_dd_minor;
+};
+
+/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
+struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ __u32 handle;
+ __u32 pad;
+};
+
+/** DRM_IOCTL_GEM_FLINK ioctl argument type */
+struct drm_gem_flink {
+ /** Handle for the object being named */
+ __u32 handle;
+
+ /** Returned global name */
+ __u32 name;
+};
+
+/** DRM_IOCTL_GEM_OPEN ioctl argument type */
+struct drm_gem_open {
+ /** Name of object being opened */
+ __u32 name;
+
+ /** Returned handle for the object */
+ __u32 handle;
+
+ /** Returned size of the object */
+ __u64 size;
+};
+
+/*
+ * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
+ * combination for the hardware cursor. The intention is that a hardware
+ * agnostic userspace can query a cursor plane size to use.
+ *
+ * Note that the cross-driver contract is to merely return a valid size;
+ * drivers are free to attach another meaning on top, eg. i915 returns the
+ * maximum plane size.
+ */
+#define DRM_CAP_CURSOR_WIDTH 0x8
+#define DRM_CAP_CURSOR_HEIGHT 0x9
+#define DRM_CAP_ADDFB2_MODIFIERS 0x10
+
+/** DRM_IOCTL_GET_CAP ioctl argument type */
+struct drm_get_cap {
+ __u64 capability;
+ __u64 value;
+};
+
+/**
+ * DRM_CLIENT_CAP_STEREO_3D
+ *
+ * if set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * monitor by advertising the supported 3D layouts in the flags of struct
+ * drm_mode_modeinfo.
+ */
+#define DRM_CLIENT_CAP_STEREO_3D 1
+
+/**
+ * DRM_CLIENT_CAP_UNIVERSAL_PLANES
+ *
+ * if set to 1, the DRM core will expose the full universal plane list
+ * (including primary and cursor planes).
+ */
+#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
+
+/**
+ * DRM_CLIENT_CAP_ATOMIC
+ *
+ * If set to 1, the DRM core will allow atomic modesetting requests.
+ */
+#define DRM_CLIENT_CAP_ATOMIC 3
+
+/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+struct drm_set_client_cap {
+ __u64 capability;
+ __u64 value;
+};
+
+#define DRM_CLOEXEC O_CLOEXEC
+struct drm_prime_handle {
+ __u32 handle;
+
+ /** Flags.. only applicable for handle->fd */
+ __u32 flags;
+
+ /** Returned dmabuf file descriptor */
+ __s32 fd;
+};
+
+#include "drm_mode.h"
+
+#define DRM_IOCTL_BASE 'd'
+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE, (nr))
+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE, (nr), type)
+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE, (nr), type)
+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE, (nr), type)
+
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
+#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
+#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
+#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
+#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
+#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
+#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
+#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
+#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap)
+
+#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
+#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
+#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
+#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
+#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
+#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
+#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
+#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
+#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
+#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
+#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
+
+#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
+
+#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
+#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
+
+#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
+#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
+
+#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
+#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
+#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
+#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
+#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
+#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
+#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
+#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
+#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
+#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
+#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
+#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
+#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
+
+#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
+#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
+
+#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
+#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
+#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
+#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
+
+#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
+#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
+
+#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
+
+#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
+
+#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
+#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
+#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
+#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
+#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
+#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
+#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd)
+#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
+
+#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
+#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
+#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
+#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
+#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
+#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
+#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
+#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
+
+#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
+#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
+#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
+#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
+#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
+#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
+#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
+#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
+#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
+#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
+#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic)
+#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob)
+#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
+
+/**
+ * Device specific ioctls should only be in their respective headers
+ * The device specific ioctl range is from 0x40 to 0x99.
+ * Generic IOCTLS restart at 0xA0.
+ *
+ * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
+ * drmCommandReadWrite().
+ */
+#define DRM_COMMAND_BASE 0x40
+#define DRM_COMMAND_END 0xA0
+
+/**
+ * Header for events written back to userspace on the drm fd. The
+ * type defines the type of event, the length specifies the total
+ * length of the event (including the header), and user_data is
+ * typically a 64 bit value passed with the ioctl that triggered the
+ * event. A read on the drm fd will always only return complete
+ * events, that is, if for example the read buffer is 100 bytes, and
+ * there are two 64 byte events pending, only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
+ * up are chipset specific.
+ */
+struct drm_event {
+ __u32 type;
+ __u32 length;
+};
+
+#define DRM_EVENT_VBLANK 0x01
+#define DRM_EVENT_FLIP_COMPLETE 0x02
+
+struct drm_event_vblank {
+ struct drm_event base;
+ __u64 user_data;
+#if defined(__sun)
+ time_t tv_sec;
+ suseconds_t tv_usec;
+#else
+ __u32 tv_sec;
+ __u32 tv_usec;
+#endif
+ __u32 sequence;
+ __u32 reserved;
+};
+
+#define DRM_CAP_DUMB_BUFFER 0x1
+#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
+#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
+#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+#define DRM_CAP_PRIME 0x5
+#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
+#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
+#define DRM_CAP_ADDFB2_MODIFIERS 0x10
+
+#define DRM_PRIME_CAP_IMPORT 0x1
+#define DRM_PRIME_CAP_EXPORT 0x2
+
+/* typedef area */
+typedef struct drm_clip_rect drm_clip_rect_t;
+typedef struct drm_drawable_info drm_drawable_info_t;
+typedef struct drm_tex_region drm_tex_region_t;
+typedef struct drm_hw_lock drm_hw_lock_t;
+typedef struct drm_version drm_version_t;
+typedef struct drm_unique drm_unique_t;
+typedef struct drm_list drm_list_t;
+typedef struct drm_block drm_block_t;
+typedef struct drm_control drm_control_t;
+typedef enum drm_map_type drm_map_type_t;
+typedef enum drm_map_flags drm_map_flags_t;
+typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
+typedef struct drm_map drm_map_t;
+typedef struct drm_client drm_client_t;
+typedef enum drm_stat_type drm_stat_type_t;
+typedef struct drm_stats drm_stats_t;
+typedef enum drm_lock_flags drm_lock_flags_t;
+typedef struct drm_lock drm_lock_t;
+typedef enum drm_dma_flags drm_dma_flags_t;
+typedef struct drm_buf_desc drm_buf_desc_t;
+typedef struct drm_buf_info drm_buf_info_t;
+typedef struct drm_buf_free drm_buf_free_t;
+typedef struct drm_buf_pub drm_buf_pub_t;
+typedef struct drm_buf_map drm_buf_map_t;
+typedef struct drm_dma drm_dma_t;
+typedef union drm_wait_vblank drm_wait_vblank_t;
+typedef struct drm_agp_mode drm_agp_mode_t;
+typedef enum drm_ctx_flags drm_ctx_flags_t;
+typedef struct drm_ctx drm_ctx_t;
+typedef struct drm_ctx_res drm_ctx_res_t;
+typedef struct drm_draw drm_draw_t;
+typedef struct drm_update_draw drm_update_draw_t;
+typedef struct drm_auth drm_auth_t;
+typedef struct drm_irq_busid drm_irq_busid_t;
+typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
+
+typedef struct drm_agp_buffer drm_agp_buffer_t;
+typedef struct drm_agp_binding drm_agp_binding_t;
+typedef struct drm_agp_info drm_agp_info_t;
+typedef struct drm_scatter_gather drm_scatter_gather_t;
+typedef struct drm_set_version drm_set_version_t;
+
+#endif /* _DRM_H_ */
diff --git a/usr/src/uts/common/drm/drmP.h b/usr/src/uts/common/drm/drmP.h
new file mode 100644
index 0000000..cc32ddc
--- /dev/null
+++ b/usr/src/uts/common/drm/drmP.h
@@ -0,0 +1,1606 @@
+/*
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
+ * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
+ */
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#ifndef _DRMP_H
+#define _DRMP_H
+
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/cmn_err.h>
+#include <sys/varargs.h>
+#include <sys/pci.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/sunldi.h>
+#include <sys/agpgart.h>
+#include <sys/time.h>
+#include <sys/gfx_private.h>
+#include <sys/ddifm.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/fm/io/ddi.h>
+#include <drm/drm.h>
+#include <drm/drm_os_solaris.h>
+#include "drm_atomic.h"
+#include <sys/queue.h>
+#include "drm_linux.h"
+#include "drm_linux_list.h"
+#include "drm_mm.h"
+#include "drm_mode.h"
+
+#include "drm_sun_idr.h"
+#include "drm_sun_timer.h"
+#include "drm_sun_workqueue.h"
+#include "drm_sun_pci.h"
+
+#ifndef __inline__
+#define __inline__ inline
+#endif
+
+#if !defined(__FUNCTION__)
+#if defined(_STDC_C99) || defined(__C99FEATURES__)
+#define __FUNCTION__ __func__
+#else
+#define __FUNCTION__ " "
+#endif
+#endif
+
+#define KB(x) ((x) * 1024)
+#define MB(x) (KB (KB (x)))
+#define GB(x) (MB (KB (x)))
+
+#define DRM_MINOR_NBITS (9)
+#define DRM_MINOR_SHIFT (0)
+#define DRM_MINOR_MAX ((1 << DRM_MINOR_NBITS) - 1)
+#define DRM_DEV2MINOR(dev) ((getminor(dev) >> DRM_MINOR_SHIFT) & DRM_MINOR_MAX)
+
+#define DRM_CLONEID_NBITS (NBITSMINOR - DRM_MINOR_NBITS)
+#define DRM_CLONEID_SHIFT (DRM_MINOR_NBITS)
+#define DRM_CLONEID_MAX ((1 << DRM_CLONEID_NBITS) - 1)
+#define DRM_DEV2CLONEID(dev) ((getminor(dev) >> DRM_CLONEID_SHIFT) & DRM_CLONEID_MAX)
+
+#define DRM_MAKEDEV(major_id, minor_id, clone_id) \
+ makedevice(major_id, (minor_id << DRM_MINOR_SHIFT) | (clone_id << DRM_CLONEID_SHIFT))
+
+/* driver capabilities and requirements mask */
+#define DRIVER_USE_AGP 0x1
+#define DRIVER_REQUIRE_AGP 0x2
+#define DRIVER_USE_MTRR 0x4
+#define DRIVER_PCI_DMA 0x8
+#define DRIVER_SG 0x10
+#define DRIVER_HAVE_DMA 0x20
+#define DRIVER_HAVE_IRQ 0x40
+#define DRIVER_IRQ_SHARED 0x80
+#define DRIVER_IRQ_VBL 0x100
+#define DRIVER_DMA_QUEUE 0x200
+#define DRIVER_FB_DMA 0x400
+#define DRIVER_IRQ_VBL2 0x800
+#define DRIVER_GEM 0x1000
+#define DRIVER_MODESET 0x2000
+#define DRIVER_PRIME 0x4000
+#define DRIVER_USE_PLATFORM_DEVICE 0x8000
+
+/* DRM space units */
+#define DRM_PAGE_SHIFT PAGESHIFT
+#define DRM_PAGE_SIZE (1 << DRM_PAGE_SHIFT)
+#define DRM_PAGE_OFFSET (DRM_PAGE_SIZE - 1)
+#define DRM_PAGE_MASK ~(DRM_PAGE_SIZE - 1)
+#define DRM_MB2PAGES(x) ((x) << 8)
+#define DRM_PAGES2BYTES(x) ((x) << DRM_PAGE_SHIFT)
+#define DRM_BYTES2PAGES(x) ((x) >> DRM_PAGE_SHIFT)
+#define DRM_PAGES2KB(x) ((x) << 2)
+#define DRM_ALIGNED(offset) (((offset) & DRM_PAGE_OFFSET) == 0)
+
+#define PAGE_SHIFT DRM_PAGE_SHIFT
+#define PAGE_SIZE DRM_PAGE_SIZE
+
+#define DRM_MAX_INSTANCES 8
+#define DRM_DEVNODE "drm"
+#define DRM_UNOPENED 0
+#define DRM_OPENED 1
+
+#define DRM_HASH_SIZE 16 /* Size of key hash table */
+#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
+#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
+
+#define DRM_MEM_DMA 0
+#define DRM_MEM_SAREA 1
+#define DRM_MEM_DRIVER 2
+#define DRM_MEM_MAGIC 3
+#define DRM_MEM_IOCTLS 4
+#define DRM_MEM_MAPS 5
+#define DRM_MEM_BUFS 6
+#define DRM_MEM_SEGS 7
+#define DRM_MEM_PAGES 8
+#define DRM_MEM_FILES 9
+#define DRM_MEM_QUEUES 10
+#define DRM_MEM_CMDS 11
+#define DRM_MEM_MAPPINGS 12
+#define DRM_MEM_BUFLISTS 13
+#define DRM_MEM_DRMLISTS 14
+#define DRM_MEM_TOTALDRM 15
+#define DRM_MEM_BOUNDDRM 16
+#define DRM_MEM_CTXBITMAP 17
+#define DRM_MEM_STUB 18
+#define DRM_MEM_SGLISTS 19
+#define DRM_MEM_AGPLISTS 20
+#define DRM_MEM_CTXLIST 21
+#define DRM_MEM_MM 22
+#define DRM_MEM_HASHTAB 23
+#define DRM_MEM_OBJECTS 24
+
+#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
+#define DRM_MAP_HASH_OFFSET 0x10000000
+#define DRM_MAP_HASH_ORDER 12
+#define DRM_OBJECT_HASH_ORDER 12
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+#define DRM_MM_INIT_MAX_PAGES 256
+
+
+/* Internal types and structures */
+#define DRM_ARRAY_SIZE(x) (sizeof (x) / sizeof (x[0]))
+#define DRM_MIN(a, b) ((a) < (b) ? (a) : (b))
+#define DRM_MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
+
+#define __OS_HAS_AGP 1
+
+#define DRM_DEV_MOD (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+#define DRM_DEV_UID 0
+#define DRM_DEV_GID 0
+
+#define DRM_CURRENTPID ddi_get_pid()
+#define DRM_SPINLOCK(l) mutex_enter(l)
+#define DRM_SPINUNLOCK(u) mutex_exit(u)
+#define DRM_UDELAY(d) udelay(d)
+#define DRM_MEMORYBARRIER()
+
+#define DRM_MINOR_UNASSIGNED 0
+#define DRM_MINOR_LEGACY 1
+#define DRM_MINOR_CONTROL 2
+#define DRM_MINOR_RENDER 3
+#define DRM_MINOR_VGATEXT 4
+#define DRM_MINOR_AGPMASTER 5
+
+#define DRM_MINOR_ID_BASE_LEGACY 0
+#define DRM_MINOR_ID_BASE_CONTROL 64
+#define DRM_MINOR_ID_BASE_RENDER 128
+#define DRM_MINOR_ID_BASE_VGATEXT 384
+#define DRM_MINOR_ID_BASE_AGPMASTER 448
+
+#define DRM_MINOR_ID_LIMIT_LEGACY 63
+#define DRM_MINOR_ID_LIMIT_CONTROL 127
+#define DRM_MINOR_ID_LIMIT_RENDER 383
+#define DRM_MINOR_ID_LIMIT_VGATEXT 447
+#define DRM_MINOR_ID_LIMIT_AGPMASTER 511
+
+#define DRM_MINOR_IS_LEGACY(minor_id) \
+ (minor_id >= DRM_MINOR_ID_BASE_LEGACY && minor_id < DRM_MINOR_ID_LIMIT_LEGACY)
+#define DRM_MINOR_IS_CONTROL(minor_id) \
+ (minor_id >= DRM_MINOR_ID_BASE_CONTROL && minor_id < DRM_MINOR_ID_LIMIT_CONTROL)
+#define DRM_MINOR_IS_RENDER(minor_id) \
+ (minor_id >= DRM_MINOR_ID_BASE_RENDER && minor_id < DRM_MINOR_ID_LIMIT_RENDER)
+#define DRM_MINOR_IS_VGATEXT(minor_id) \
+ (minor_id >= DRM_MINOR_ID_BASE_VGATEXT && minor_id < DRM_MINOR_ID_LIMIT_VGATEXT)
+#define DRM_MINOR_IS_AGPMASTER(minor_id) \
+ (minor_id >= DRM_MINOR_ID_BASE_AGPMASTER && minor_id < DRM_MINOR_ID_LIMIT_AGPMASTER)
+
+/* Legacy VGA regions */
+#define VGA_RSRC_NONE 0x00
+#define VGA_RSRC_LEGACY_IO 0x01
+#define VGA_RSRC_LEGACY_MEM 0x02
+#define VGA_RSRC_LEGACY_MASK (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
+/* Non-legacy access */
+#define VGA_RSRC_NORMAL_IO 0x04
+#define VGA_RSRC_NORMAL_MEM 0x08
+
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+
+typedef struct drm_file drm_file_t;
+typedef struct drm_device drm_device_t;
+typedef struct drm_driver drm_driver_t;
+
+#define DRM_COPYFROM_WITH_RETURN(dest, src, size) \
+ if (ddi_copyin((src), (dest), (size), 0)) { \
+ DRM_ERROR("copy from user failed"); \
+ return (-EFAULT); \
+ }
+
+#define DRM_COPYTO_WITH_RETURN(dest, src, size) \
+ if (ddi_copyout((src), (dest), (size), 0)) { \
+ DRM_ERROR("copy to user failed"); \
+ return (-EFAULT); \
+ }
+
+#define DRM_COPY_FROM_USER(dest, src, size) \
+ ddi_copyin((src), (dest), (size), 0) /* flag for src */
+
+#define DRM_COPY_TO_USER(dest, src, size) \
+ ddi_copyout((src), (dest), (size), 0) /* flags for dest */
+
+#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
+ ddi_copyin((arg2), (arg1), (arg3), 0)
+
+#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
+ ddi_copyout((arg2), arg1, arg3, 0)
+
+#define DRM_READ8(map, offset) \
+ *(volatile uint8_t *)((uintptr_t)((map)->handle) + (offset))
+#define DRM_READ16(map, offset) \
+ *(volatile uint16_t *)((uintptr_t)((map)->handle) + (offset))
+#define DRM_READ32(map, offset) \
+ *(volatile uint32_t *)((uintptr_t)((map)->handle) + (offset))
+#define DRM_READ64(map, offset) \
+ *(volatile uint64_t *)((uintptr_t)((map)->handle) + (offset))
+#define DRM_WRITE8(map, offset, val) \
+ *(volatile uint8_t *)((uintptr_t)((map)->handle) + (offset)) = (val)
+#define DRM_WRITE16(map, offset, val) \
+ *(volatile uint16_t *)((uintptr_t)((map)->handle) + (offset)) = (val)
+#define DRM_WRITE32(map, offset, val) \
+ *(volatile uint32_t *)((uintptr_t)((map)->handle) + (offset)) = (val)
+#define DRM_WRITE64(map, offset, val) \
+ *(volatile uint64_t *)((uintptr_t)((map)->handle) + (offset)) = (val)
+
+typedef struct drm_wait_queue {
+ kcondvar_t cv;
+ kmutex_t lock;
+}wait_queue_head_t;
+
+#define DRM_INIT_WAITQUEUE(q, pri) \
+{ \
+ mutex_init(&(q)->lock, NULL, MUTEX_DRIVER, pri); \
+ cv_init(&(q)->cv, NULL, CV_DRIVER, NULL); \
+}
+
+#define DRM_FINI_WAITQUEUE(q) \
+{ \
+ mutex_destroy(&(q)->lock); \
+ cv_destroy(&(q)->cv); \
+}
+
+#define DRM_WAKEUP(q) \
+{ \
+ mutex_enter(&(q)->lock); \
+ cv_broadcast(&(q)->cv); \
+ mutex_exit(&(q)->lock); \
+}
+
+#define DRM_WAIT_ON(ret, q, timeout, condition) \
+ mutex_enter(&(q)->lock); \
+ while (!(condition)) { \
+ ret = cv_reltimedwait_sig(&(q)->cv, &(q)->lock, timeout,\
+ TR_CLOCK_TICK); \
+ if (ret == -1) { \
+ ret = EBUSY; \
+ break; \
+ } else if (ret == 0) { \
+ ret = EINTR; \
+ break; \
+ } else { \
+ ret = 0; \
+ } \
+ } \
+ mutex_exit(&(q)->lock);
+
+#define DRM_WAIT(ret, q, condition) \
+mutex_enter(&(q)->lock); \
+if (!(condition)) { \
+ (void) cv_timedwait_sig(&(q)->cv, &(q)->lock, jiffies + 3 * DRM_HZ); \
+ ret = 0; \
+ if (!(condition)) { \
+ /* make sure we got what we want */ \
+ if (wait_for(condition, 3000)) \
+ ret = EBUSY; \
+ } \
+} \
+mutex_exit(&(q)->lock);
+
+/*
+#define DRM_GETSAREA() \
+{ \
+ drm_local_map_t *map; \
+ TAILQ_FOREACH(map, &dev->maplist, link) { \
+ if (map->type == _DRM_SHM && \
+ map->flags & _DRM_CONTAINS_LOCK) { \
+ dev_priv->sarea = map; \
+ break; \
+ } \
+ } \
+}
+*/
+
+#define wake_up_all DRM_WAKEUP
+
+/**
+ * Test that the hardware lock is held by the caller, returning otherwise.
+ *
+ * \param dev DRM device.
+ * \param filp file pointer of the caller.
+ */
+#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
+do { \
+ if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
+ _file_priv->master->lock.file_priv != _file_priv) { \
+ DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
+ __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
+ (void *)_file_priv->master->lock.file_priv, (void *)_file_priv); \
+ return -EINVAL; \
+ } \
+} while (*"\0")
+
+/**
+ * Copy and IOCTL return string to user space
+ */
+#define DRM_COPY(name, value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ if (DRM_COPY_TO_USER(name, value, len)) \
+ return (EFAULT); \
+ }
+
+#define DRM_IRQ_ARGS caddr_t arg
+#define IRQ_HANDLED DDI_INTR_CLAIMED
+#define IRQ_NONE DDI_INTR_UNCLAIMED
+
+enum {
+ DRM_IS_NOT_AGP,
+ DRM_IS_AGP,
+ DRM_MIGHT_BE_AGP
+};
+
+/* Capabilities taken from src/sys/dev/pci/pcireg.h. */
+#ifndef PCIY_AGP
+#define PCIY_AGP 0x02
+#endif
+
+#ifndef PCIY_EXPRESS
+#define PCIY_EXPRESS 0x10
+#endif
+
+#define PAGE_ALIGN(addr) (((addr) + DRM_PAGE_SIZE - 1) & DRM_PAGE_MASK)
+#define DRM_SUSER(p) (crgetsgid(p) == 0 || crgetsuid(p) == 0)
+
+/*
+ * wait for 400 milliseconds
+ */
+//#define DRM_HZ drv_usectohz(400000)
+#define DRM_HZ drv_usectohz(1000000)
+
+#define DRM_SUPPORT 1
+#define DRM_UNSUPPORT 0
+
+#define __OS_HAS_AGP 1
+
+#ifndef offsetof
+#define __offsetof(type, field) ((size_t)(&((type *)0)->field))
+#define offsetof(type, field) __offsetof(type, field)
+#endif
+
+typedef struct drm_pci_id_list {
+ int vendor;
+ int device;
+ unsigned long driver_data;
+} drm_pci_id_list_t;
+
+#define DRM_DEVICE drm_device_t *dev = dev1
+#define DRM_IOCTL_ARGS \
+ dev_t dev_id, struct drm_device *dev, void *data, struct drm_file *file, int ioctl_mode, cred_t *credp
+
+typedef int drm_ioctl_t(DRM_IOCTL_ARGS);
+
+#define DRM_AUTH 0x1
+#define DRM_MASTER 0x2
+#define DRM_ROOT_ONLY 0x4
+#define DRM_CONTROL_ALLOW 0x8
+#define DRM_UNLOCKED 0x10
+
+typedef struct drm_ioctl_desc {
+ drm_ioctl_t *func;
+ int flags;
+ int cmd;
+ unsigned int cmd_drv;
+ const char *name;
+ int (*copyin32)(void *dest, void *src);
+ int (*copyout32)(void *dest, void *src);
+} drm_ioctl_desc_t;
+
+/**
+ * Creates a driver or general drm_ioctl_desc array entry for the given
+ * ioctl, for use by drm_ioctl().
+ */
+#ifdef _MULTI_DATAMODEL
+#define DRM_IOCTL_DEF(ioctl, _func, _flags, _copyin32, _copyout32) \
+ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = _copyin32, .copyout32 = _copyout32}
+#else
+#define DRM_IOCTL_DEF(ioctl, _func, _flags, _copyin32, _copyout32) \
+ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = NULL, .copyout32 = NULL}
+#endif
+
+typedef struct drm_magic_entry {
+ drm_magic_t magic;
+ struct drm_file *priv;
+ struct drm_magic_entry *next;
+} drm_magic_entry_t;
+
+typedef struct drm_magic_head {
+ struct drm_magic_entry *head;
+ struct drm_magic_entry *tail;
+} drm_magic_head_t;
+
+typedef struct drm_buf {
+ int idx; /* Index into master buflist */
+ int total; /* Buffer size */
+ int order; /* log-base-2(total) */
+ int used; /* Amount of buffer in use (for DMA) */
+ unsigned long offset; /* Byte offset (used internally) */
+ void *address; /* Address of buffer */
+ unsigned long bus_address; /* Bus address of buffer */
+ struct drm_buf *next; /* Kernel-only: used for free list */
+ volatile int pending; /* On hardware DMA queue */
+ struct drm_file *file_priv;
+ /* Uniq. identifier of holding process */
+ int context; /* Kernel queue for this buffer */
+ enum {
+ DRM_LIST_NONE = 0,
+ DRM_LIST_FREE = 1,
+ DRM_LIST_WAIT = 2,
+ DRM_LIST_PEND = 3,
+ DRM_LIST_PRIO = 4,
+ DRM_LIST_RECLAIM = 5
+ } list; /* Which list we're on */
+
+ int dev_priv_size; /* Size of buffer private storage */
+ void *dev_private; /* Per-buffer private storage */
+} drm_buf_t;
+
+typedef struct drm_freelist {
+ int initialized; /* Freelist in use */
+ uint32_t count; /* Number of free buffers */
+ drm_buf_t *next; /* End pointer */
+
+ int low_mark; /* Low water mark */
+ int high_mark; /* High water mark */
+} drm_freelist_t;
+
+typedef struct drm_buf_entry {
+ int buf_size;
+ int buf_count;
+ drm_buf_t *buflist;
+ int seg_count;
+ int page_order;
+
+ uint32_t *seglist;
+ unsigned long *seglist_bus;
+
+ drm_freelist_t freelist;
+} drm_buf_entry_t;
+
+/* Event queued up for userspace to read */
+struct drm_pending_event {
+ struct drm_event *event;
+ struct list_head link;
+ struct drm_file *file_priv;
+ pid_t pid; /* pid of requester, no guarantee it's valid by the time
+ we deliver the event, for tracing only */
+ void (*destroy)(void *event, size_t size);
+};
+
+
+typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
+
+/**
+ * Kernel side of a mapping
+ */
+typedef struct drm_local_map {
+ unsigned long offset; /**< Requested physical address (0 for SAREA)*/
+ unsigned long size; /**< Requested physical size (bytes) */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
+ void *handle; /**< User-space: "Handle" to pass to mmap() */
+ /**< Kernel-space: kernel-virtual address */
+ int mtrr; /**< MTRR slot used */
+
+ /* OSOL_drm Begin */
+ ddi_acc_handle_t acc_handle; /**< The data access handle */
+ ddi_umem_cookie_t umem_cookie; /**< For SAREA alloc and free */
+ int callback; /** need callback ops to handle GTT mmap */
+ int gtt_mmap; /** gtt mmap has been setuped */
+ /* OSOL_drm End */
+} drm_local_map_t;
+
+/**
+ * Mappings list
+ */
+struct drm_map_list {
+ struct list_head head; /**< list head */
+ /* OSOL_drm: struct drm_hash_item hash; */
+ struct drm_local_map *map; /**< mapping */
+ uint64_t user_token;
+ struct drm_master *master;
+ struct drm_mm_node *file_offset_node; /**< fake offset */
+};
+
+struct drm_ctx_list {
+ struct list_head head; /**< list head */
+ drm_context_t handle; /**< context handle */
+ struct drm_file *tag; /**< associated fd private data */
+};
+
+struct drm_history_list {
+ struct list_head head;
+ char info[20]; /**< history info */
+ uint32_t cur_seq; /**< current system seqno */
+ uint32_t last_seq; /**< last seqno */
+ void *ring_ptr; /**< current ring ptr */
+};
+
+struct gem_map_list {
+ struct list_head head; /**< list head */
+ devmap_cookie_t dhp;
+ offset_t mapoffset;
+ size_t maplen;
+};
+
+/*
+ * This structure defines the drm_mm memory object, which will be used by the
+ * DRM for its buffer objects.
+ */
+struct drm_gem_object {
+ /* Reference count of this object */
+ struct kref refcount;
+
+ /* Handle count of this object. Each handle also holds a reference */
+ atomic_t handle_count;
+
+ /* Related drm device */
+ struct drm_device *dev;
+
+ /* Mapping info for this object */
+ struct drm_map_list map_list;
+
+ /*
+ * Size of the object, in bytes. Immutable over the object's
+ * lifetime.
+ */
+ size_t size;
+
+ /*
+ * Global name for this object, starts at 1. 0 means unnamed.
+ * Access is covered by the object_name_lock in the related drm_device
+ */
+ int name;
+
+ /*
+ * Memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated
+ */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
+ /*
+ * While validating an exec operation, the
+ * new read/write domain values are computed here.
+ * They will be transferred to the above values
+ * at the point that any cache flushing occurs
+ */
+ uint32_t pending_read_domains;
+ uint32_t pending_write_domain;
+
+ void *driver_private;
+
+ struct drm_map_list maplist;
+ ddi_dma_handle_t dma_hdl;
+ ddi_acc_handle_t acc_hdl;
+ caddr_t kaddr;
+ size_t real_size; /* real size of memory */
+ pfn_t *pfnarray;
+ caddr_t gtt_map_kaddr;
+
+ struct gfxp_pmem_cookie mempool_cookie;
+
+ struct list_head seg_list;
+
+ struct list_head track_list; /* for i915 mdb */
+ struct list_head his_list;
+};
+
+typedef struct drm_lock_data {
+ struct drm_hw_lock *hw_lock; /**< Hardware lock */
+ /** Private of lock holder's file (NULL=kernel) */
+ struct drm_file *file_priv;
+ kcondvar_t lock_cv; /* lock queue - SOLARIS Specific */
+ kmutex_t lock_mutex; /* lock - SOLARIS Specific */
+ unsigned long lock_time; /* Time of last lock in clock ticks */
+ uint32_t kernel_waiters;
+ uint32_t user_waiters;
+ int idle_has_lock;
+} drm_lock_data_t;
+
+/*
+ * This structure, in drm_device_t, is always initialized while the device
+ * is open. dev->dma_lock protects the incrementing of dev->buf_use, which
+ * when set marks that no further bufs may be allocated until device teardown
+ * occurs (when the last open of the device has closed). The high/low
+ * watermarks of bufs are only touched by the X Server, and thus not
+ * concurrently accessed, so no locking is needed.
+ */
+
+/**
+ * DMA data.
+ */
+typedef struct drm_device_dma {
+
+ struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
+ int buf_count; /**< total number of buffers */
+ struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
+ int seg_count;
+ int page_count; /**< number of pages */
+ unsigned long *pagelist; /**< page list */
+ unsigned long byte_count;
+ enum {
+ _DRM_DMA_USE_AGP = 0x01,
+ _DRM_DMA_USE_SG = 0x02,
+ _DRM_DMA_USE_FB = 0x04,
+ _DRM_DMA_USE_PCI_RO = 0x08
+ } flags;
+
+} drm_device_dma_t;
+
+/**
+ * AGP memory entry. Stored as a doubly linked list.
+ */
+typedef struct drm_agp_mem {
+ unsigned long handle; /**< handle */
+ unsigned long bound; /**< address */
+ int pages;
+ struct list_head head;
+} drm_agp_mem_t;
+
+/**
+ * AGP data.
+ *
+ * \sa drm_agp_init() and drm_device::agp.
+ */
+typedef struct drm_agp_head {
+ agp_info_t agp_info; /**< AGP device information */
+ struct list_head memory;
+ unsigned long mode; /**< AGP mode */
+ int enabled; /**< whether the AGP bus as been enabled */
+ int acquired; /**< whether the AGP device has been acquired */
+ unsigned long base;
+ int mtrr;
+ int cant_use_aperture;
+ unsigned long page_mask;
+ ldi_ident_t agpgart_li;
+ ldi_handle_t agpgart_lh;
+} drm_agp_head_t;
+
+typedef struct drm_dma_handle {
+ ddi_dma_handle_t dma_hdl;
+ ddi_acc_handle_t acc_hdl;
+ ddi_dma_cookie_t cookie;
+ uint_t cookie_num;
+ uintptr_t vaddr; /* virtual addr */
+ uintptr_t paddr; /* physical addr */
+ size_t real_sz; /* real size of memory */
+} drm_dma_handle_t;
+
+typedef struct drm_sg_mem {
+ unsigned long handle;
+ void *virtual;
+ int pages;
+ dma_addr_t *busaddr;
+
+ ddi_umem_cookie_t *umem_cookie;
+ drm_dma_handle_t *dmah_sg;
+ drm_dma_handle_t *dmah_gart; /* Handle to PCI memory */
+} drm_sg_mem_t;
+
+typedef TAILQ_HEAD(drm_vbl_sig_list, drm_vbl_sig) drm_vbl_sig_list_t;
+typedef struct drm_vbl_sig {
+ TAILQ_ENTRY(drm_vbl_sig) link;
+ unsigned int sequence;
+ int signo;
+ int pid;
+} drm_vbl_sig_t;
+
+
+/* used for clone device */
+typedef TAILQ_HEAD(drm_cminor_list, drm_cminor) drm_cminor_list_t;
+typedef struct drm_cminor {
+ TAILQ_ENTRY(drm_cminor) link;
+ drm_file_t *fpriv;
+ int minor;
+} drm_cminor_t;
+
+/* location of GART table */
+#define DRM_ATI_GART_MAIN 1
+#define DRM_ATI_GART_FB 2
+
+typedef struct ati_pcigart_info {
+ int gart_table_location;
+ int is_pcie;
+ void *addr;
+ dma_addr_t bus_addr;
+ drm_local_map_t mapping;
+} drm_ati_pcigart_info;
+
+/* Size of ringbuffer for vblank timestamps. Just double-buffer
+ * in initial implementation.
+ */
+#define DRM_VBLANKTIME_RBSIZE 2
+
+/* Flags and return codes for get_vblank_timestamp() driver function. */
+#define DRM_CALLED_FROM_VBLIRQ 1
+#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
+#define DRM_VBLANKTIME_INVBL (1 << 1)
+
+/* get_scanout_position() return flags */
+#define DRM_SCANOUTPOS_VALID (1 << 0)
+#define DRM_SCANOUTPOS_INVBL (1 << 1)
+#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
+
+/* DRM device structure */
+struct drm_device;
+
+struct drm_bus {
+ int bus_type;
+ int (*get_irq)(struct drm_device *dev);
+ const char *(*get_name)(struct drm_device *dev);
+ int (*set_busid)(struct drm_device *dev, struct drm_master *master);
+ int (*set_unique)(struct drm_device *dev, struct drm_master *master,
+ struct drm_unique *unique);
+ int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
+ /* hooks that are for PCI */
+ int (*agp_init)(struct drm_device *dev);
+
+};
+
+struct drm_driver {
+ int (*load)(struct drm_device *, unsigned long);
+ int (*firstopen)(struct drm_device *);
+ int (*open)(struct drm_device *, drm_file_t *);
+ void (*preclose)(struct drm_device *, drm_file_t *);
+ void (*postclose)(struct drm_device *, drm_file_t *);
+ void (*lastclose)(struct drm_device *);
+ int (*unload)(struct drm_device *);
+ void (*reclaim_buffers_locked)(struct drm_device *, drm_file_t *);
+ int (*presetup)(struct drm_device *);
+ int (*postsetup)(struct drm_device *);
+ int (*open_helper)(struct drm_device *, drm_file_t *);
+ void (*free_filp_priv)(struct drm_device *, drm_file_t *);
+ void (*release)(struct drm_device *, void *);
+ int (*dma_ioctl)(DRM_IOCTL_ARGS);
+ int (*dma_quiescent)(struct drm_device *);
+ int (*dma_flush_block_and_flush)(struct drm_device *,
+ int, drm_lock_flags_t);
+ int (*dma_flush_unblock)(struct drm_device *, int,
+ drm_lock_flags_t);
+ int (*context_dtor)(struct drm_device *, int);
+ int (*device_is_agp) (struct drm_device *);
+
+ /**
+ * Called by vblank timestamping code.
+ *
+ * Return the current display scanout position from a crtc.
+ *
+ * \param dev DRM device.
+ * \param crtc Id of the crtc to query.
+ * \param *vpos Target location for current vertical scanout position.
+ * \param *hpos Target location for current horizontal scanout position.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * \return Flags, or'ed together as follows:
+ *
+ * DRM_SCANOUTPOS_VALID = Query successfull.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a constant
+ * but unknown small number of scanlines wrt. real scanout position.
+ *
+ */
+ int (*get_scanout_position) (struct drm_device *dev, int crtc,
+ int *vpos, int *hpos);
+
+ /**
+ * Called by \c drm_get_last_vbltimestamp. Should return a precise
+ * timestamp when the most recent VBLANK interval ended or will end.
+ *
+ * Specifically, the timestamp in @vblank_time should correspond as
+ * closely as possible to the time when the first video scanline of
+ * the video frame after the end of VBLANK will start scanning out,
+ * the time immmediately after end of the VBLANK interval. If the
+ * @crtc is currently inside VBLANK, this will be a time in the future.
+ * If the @crtc is currently scanning out a frame, this will be the
+ * past start time of the current scanout. This is meant to adhere
+ * to the OpenML OML_sync_control extension specification.
+ *
+ * \param dev dev DRM device handle.
+ * \param crtc crtc for which timestamp should be returned.
+ * \param *max_error Maximum allowable timestamp error in nanoseconds.
+ * Implementation should strive to provide timestamp
+ * with an error of at most *max_error nanoseconds.
+ * Returns true upper bound on error for timestamp.
+ * \param *vblank_time Target location for returned vblank timestamp.
+ * \param flags 0 = Defaults, no special treatment needed.
+ * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
+ * irq handler. Some drivers need to apply some workarounds
+ * for gpu-specific vblank irq quirks if flag is set.
+ *
+ * \returns
+ * Zero if timestamping isn't supported in current display mode or a
+ * negative number on failure. A positive status code on success,
+ * which describes how the vblank_time timestamp was computed.
+ */
+ int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
+
+ void (*irq_preinstall)(struct drm_device *);
+ int (*irq_postinstall)(struct drm_device *);
+ void (*irq_uninstall)(struct drm_device *dev);
+
+ uint_t (*irq_handler)(DRM_IRQ_ARGS);
+ int (*vblank_wait)(struct drm_device *, unsigned int *);
+ int (*vblank_wait2)(struct drm_device *, unsigned int *);
+ /* added for intel minimized vblank */
+ u32 (*get_vblank_counter)(struct drm_device *dev, int crtc);
+ int (*enable_vblank)(struct drm_device *dev, int crtc);
+ void (*disable_vblank)(struct drm_device *dev, int crtc);
+
+ /* Master routines */
+ int (*master_create)(struct drm_device *dev, struct drm_master *master);
+ void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
+ /**
+ * master_set is called whenever the minor master is set.
+ * master_drop is called whenever the minor master is dropped.
+ */
+
+ int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_open);
+ void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_release);
+
+ void (*entervt)(struct drm_device *dev);
+ void (*leavevt)(struct drm_device *dev);
+ void (*agp_support_detect)(struct drm_device *dev, unsigned long flags);
+
+ /*
+ * Driver-specific constructor for drm_gem_objects, to set up
+ * obj->driver_private.
+ *
+ * Returns 0 on success.
+ */
+ int (*gem_init_object) (struct drm_gem_object *obj);
+ void (*gem_free_object) (struct drm_gem_object *obj);
+
+ int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
+ void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
+ void (*gem_fault) (struct drm_gem_object *obj);
+
+ /* vga arb irq handler */
+ void (*vgaarb_irq)(struct drm_device *dev, bool state);
+
+ /* dumb alloc support */
+ int (*dumb_create)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+ int (*dumb_map_offset)(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+ int (*dumb_destroy)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+
+ int major;
+ int minor;
+ int patchlevel;
+ char *name;
+ char *desc;
+ char *date;
+
+ u32 driver_features;
+ drm_ioctl_desc_t *ioctls;
+ int num_ioctls;
+
+ int buf_priv_size;
+
+ unsigned use_agp :1;
+ unsigned require_agp :1;
+ unsigned use_sg :1;
+ unsigned use_dma :1;
+ unsigned use_pci_dma :1;
+ unsigned use_dma_queue :1;
+ unsigned use_irq :1;
+ unsigned use_vbl_irq :1;
+ unsigned use_vbl_irq2 :1;
+ unsigned use_mtrr :1;
+ unsigned use_gem;
+ unsigned use_kms;
+
+ struct drm_pci_id_list *id_table;
+};
+#include "drm_crtc.h"
+struct drm_master {
+
+ struct kref refcount; /* refcount for this master */
+
+ struct list_head head; /**< each minor contains a list of masters */
+ struct drm_minor *minor; /**< link back to minor we are a master for */
+
+ char *unique; /**< Unique identifier: e.g., busid */
+ int unique_len; /**< Length of unique field */
+ int unique_size; /**< amount allocated */
+
+ int blocked; /**< Blocked due to VC switch? */
+
+ /** \name Authentication */
+ /*@{ */
+ drm_magic_head_t magiclist[DRM_HASH_SIZE];
+ /*@} */
+
+ struct drm_lock_data lock; /**< Information on hardware lock */
+
+ void *driver_priv; /**< Private structure for driver to use */
+};
+
+struct drm_minor {
+ int index; /**< Minor device number */
+ int type; /**< Control or render */
+ dev_t device;
+ struct drm_device *dev;
+ struct drm_master *master; /* currently active master for this node */
+ struct list_head master_list;
+ struct drm_mode_group mode_group;
+
+ char name[32];
+ struct idr clone_idr;
+ void *private;
+};
+
+/* mode specified on the command line */
+struct drm_cmdline_mode {
+ bool specified;
+ bool refresh_specified;
+ bool bpp_specified;
+ int xres, yres;
+ int bpp;
+ int refresh;
+ bool rb;
+ bool interlace;
+ bool cvt;
+ bool margins;
+ enum drm_connector_force force;
+};
+
+struct drm_pending_vblank_event {
+ struct drm_pending_event base;
+ int pipe;
+ struct drm_event_vblank event;
+};
+
+struct drm_file {
+ TAILQ_ENTRY(drm_file) link;
+
+ int authenticated;
+ pid_t pid;
+ uid_t uid;
+ drm_magic_t magic;
+ unsigned long ioctl_count;
+ struct list_head lhead;
+ struct drm_minor *minor;
+ unsigned long lock_count;
+
+ /** Mapping of mm object handles to object pointers. */
+ struct idr_list object_idr;
+ /** Lock for synchronization of access to object_idr. */
+ spinlock_t table_lock;
+
+ void *driver_priv;
+
+ int is_master; /* this file private is a master for a minor */
+ struct drm_master *master; /* master this node is currently associated with
+ N.B. not always minor->master */
+
+ /**
+ * fbs - List of framebuffers associated with this file.
+ *
+ * Protected by fbs_lock. Note that the fbs list holds a reference on
+ * the fb object to prevent it from untimely disappearing.
+ */
+ struct list_head fbs;
+ struct mutex fbs_lock;
+
+ wait_queue_head_t event_wait;
+ struct list_head event_list;
+ int event_space;
+ struct pollhead drm_pollhead;
+};
+
+/*
+ * hardware-specific code needs to initialize mutexes which
+ * can be used in interrupt context, so they need to know
+ * the interrupt priority. Interrupt cookie in drm_device
+ * structure is the intr_block field.
+ */
+#define DRM_INTR_PRI(dev) \
+ DDI_INTR_PRI((dev)->pdev->intr_block)
+
+struct drm_device {
+ int if_version; /**< Highest interface version set */
+
+ /** \name Locks */
+ /*@{ */
+ spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */
+ kmutex_t struct_mutex; /* < For others */
+ /*@} */
+
+ /** \name Usage Counters */
+ /*@{ */
+ int open_count; /**< Outstanding files open */
+ atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
+ int buf_use; /**< Buffers in use -- cannot alloc */
+ atomic_t buf_alloc; /**< Buffer allocation in progress */
+ /*@} */
+
+ /** \name Performance counters */
+ /*@{ */
+ unsigned long counters;
+ enum drm_stat_type types[15];
+ atomic_t counts[15];
+ /*@} */
+
+ struct list_head filelist;
+
+ /** \name Memory management */
+ /*@{ */
+ struct list_head maplist; /**< Linked list of regions */
+ /*@} */
+
+ /** \name Context handle management */
+ /*@{ */
+ struct list_head ctxlist; /**< Linked list of context handles */
+ int ctx_count; /**< Number of context handles */
+ struct mutex ctxlist_mutex; /**< For ctxlist */
+
+ struct idr ctx_idr;
+
+ /*@} */
+
+ /** \name DMA queues (contexts) */
+ /*@{ */
+ struct drm_device_dma *dma; /**< Optional pointer for DMA support */
+ /*@} */
+
+ /** \name Context support */
+ /*@{ */
+ int irq_enabled; /* True if the irq handler is enabled */
+ atomic_t context_flag; /* Context swapping flag */
+ wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
+ int last_context; /* Last current context */
+ unsigned long last_switch; /**< jiffies at last context switch */
+ /*@} */
+
+ struct workqueue_struct *drm_wq;
+ struct work_struct work;
+ /** \name VBLANK IRQ support */
+ /*@{ */
+
+ /*
+ * At load time, disabling the vblank interrupt won't be allowed since
+ * old clients may not call the modeset ioctl and therefore misbehave.
+ * Once the modeset ioctl *has* been called though, we can safely
+ * disable them when unused.
+ */
+ int vblank_disable_allowed;
+
+ wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
+ atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
+ struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
+ spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
+ spinlock_t vbl_lock;
+ atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
+ u32 *last_vblank; /* protected by dev->vbl_lock, used */
+ /* for wraparound handling */
+ int *vblank_enabled; /* so we don't call enable more than
+ once per disable */
+ int *vblank_inmodeset; /* Display driver is setting mode */
+ u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */
+ struct timer_list vblank_disable_timer;
+
+ u32 max_vblank_count; /**< size of vblank counter register */
+
+ /**
+ * List of events
+ */
+ struct list_head vblank_event_list;
+ spinlock_t event_lock;
+
+ /*@} */
+
+ struct drm_agp_head *agp; /**< AGP data */
+ ulong_t agp_aperbase;
+
+ struct device *dev; /**< Device structure */
+ struct pci_dev *pdev; /**< PCI device structure */
+ int pci_vendor; /**< PCI vendor id */
+ int pci_device; /**< PCI device id */
+
+ struct drm_sg_mem *sg; /**< Scatter gather memory */
+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
+ void *dev_private; /**< device private data */
+ void *mm_private;
+ struct drm_driver *driver;
+ struct drm_local_map *agp_buffer_map;
+ unsigned int agp_buffer_token;
+ struct drm_minor *control; /**< Control node for card */
+ struct drm_minor *primary; /**< render type primary screen head */
+
+ struct drm_mode_config mode_config; /**< Current mode config */
+ struct work_struct output_poll_work;
+ struct timer_list output_poll_timer;
+
+ /* \name GEM information */
+ /* @{ */
+ kmutex_t object_name_lock;
+ struct idr_list object_name_idr;
+ /* @} */
+ int switch_power_state;
+
+ /* OSOL Begin*/
+ dev_info_t *devinfo;
+ struct drm_minor *vgatext;
+ struct drm_minor *agpmaster;
+
+ struct idr map_idr;
+
+ /* Locks */
+ kmutex_t dma_lock; /* protects dev->dma */
+ kmutex_t irq_lock; /* protects irq condition checks */
+
+ kmutex_t page_fault_lock;
+
+ kstat_t *asoft_ksp; /* kstat support */
+
+ struct list_head gem_objects_list;
+ spinlock_t track_lock;
+
+ uint32_t *old_gtt;
+ size_t old_gtt_size;
+ uint32_t *gtt_dump;
+
+ /*
+ * FMA capabilities
+ */
+ int drm_fm_cap;
+ /* OSOL End */
+};
+
+#define DRM_SWITCH_POWER_ON 0
+#define DRM_SWITCH_POWER_OFF 1
+#define DRM_SWITCH_POWER_CHANGING 2
+
+static __inline__ int drm_core_check_feature(struct drm_device *dev,
+ int feature)
+{
+ return ((dev->driver->driver_features & feature) ? 1 : 0);
+}
+#if __OS_HAS_AGP
+static inline int drm_core_has_AGP(struct drm_device *dev)
+{
+ return drm_core_check_feature(dev, DRIVER_USE_AGP);
+}
+#else
+#define drm_core_has_AGP(dev) (0)
+#endif
+
+extern struct idr drm_minors_idr;
+extern struct cb_ops drm_cb_ops;
+
+void *drm_alloc(size_t, int);
+void *drm_calloc(size_t, size_t, int);
+void *drm_realloc(void *, size_t, size_t, int);
+void drm_free(void *, size_t, int);
+int drm_ioremap(drm_device_t *, drm_local_map_t *);
+void drm_ioremapfree(drm_local_map_t *);
+void *drm_sun_ioremap(uint64_t paddr, size_t size, uint32_t mode);
+void drm_sun_iounmap(void *addr);
+void drm_core_ioremap(struct drm_local_map *, struct drm_device *);
+void drm_core_ioremapfree(struct drm_local_map *, struct drm_device *);
+
+void drm_pci_free(drm_dma_handle_t *);
+void *drm_pci_alloc(drm_device_t *, size_t, size_t, dma_addr_t, int);
+
+int drm_ctxbitmap_init(drm_device_t *);
+void drm_ctxbitmap_cleanup(drm_device_t *);
+void drm_ctxbitmap_free(drm_device_t *, int);
+
+/* Locking IOCTL support (drm_lock.c) */
+int drm_lock_take(struct drm_lock_data *, unsigned int);
+int drm_lock_free(struct drm_lock_data *, unsigned int);
+int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
+
+/* Buffer management support (drm_bufs.c) */
+extern int drm_map_handle(struct drm_device *dev, struct drm_map_list *list);
+unsigned long drm_get_resource_start(drm_device_t *, unsigned int);
+unsigned long drm_get_resource_len(drm_device_t *, unsigned int);
+int drm_initmap(drm_device_t *, unsigned long, unsigned long,
+ unsigned int, int, int);
+extern int drm_rmmap(struct drm_device *, struct drm_local_map *);
+extern int drm_addmap(struct drm_device *, unsigned long, unsigned long,
+ enum drm_map_type, enum drm_map_flags, struct drm_local_map **);
+int drm_order(unsigned long);
+int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map);
+
+/* DMA support (drm_dma.c) */
+int drm_dma_setup(drm_device_t *);
+void drm_dma_takedown(drm_device_t *);
+void drm_free_buffer(drm_device_t *, drm_buf_t *);
+void drm_core_reclaim_buffers(drm_device_t *, drm_file_t *);
+#define drm_reclaim_buffers drm_core_reclaim_buffers
+
+/* IRQ support (drm_irq.c) */
+int drm_irq_install(drm_device_t *);
+int drm_irq_uninstall(drm_device_t *);
+uint_t drm_irq_handler(DRM_IRQ_ARGS);
+void drm_driver_irq_preinstall(drm_device_t *);
+void drm_driver_irq_postinstall(drm_device_t *);
+void drm_driver_irq_uninstall(drm_device_t *);
+int drm_vblank_wait(drm_device_t *, unsigned int *);
+u32 drm_vblank_count(struct drm_device *dev, int crtc);
+u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime);
+extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
+ struct drm_pending_vblank_event *e);
+bool drm_handle_vblank(struct drm_device *dev, int crtc);
+int drm_vblank_get(struct drm_device *dev, int crtc);
+void drm_vblank_put(struct drm_device *dev, int crtc);
+void drm_vblank_off(struct drm_device *dev, int crtc);
+extern void drm_vblank_off(struct drm_device *dev, int crtc);
+int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+void drm_vblank_cleanup(struct drm_device *dev);
+u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags);
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ int crtc, int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ struct drm_crtc *refcrtc);
+void drm_calc_timestamping_constants(struct drm_crtc *crtc);
+
+extern struct drm_display_mode *
+drm_mode_create_from_cmdline_mode(struct drm_device *dev,
+ struct drm_cmdline_mode *cmd);
+
+/* Modesetting support */
+int drm_modeset_ctl(DRM_IOCTL_ARGS);
+void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
+void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
+int pci_enable_msi(struct pci_dev *pdev);
+void pci_disable_msi(struct pci_dev *pdev);
+/* AGP/GART support (drm_agpsupport.h) */
+extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
+extern int drm_agp_acquire(struct drm_device *dev);
+extern int drm_agp_acquire_ioctl(DRM_IOCTL_ARGS);
+extern int drm_agp_release(struct drm_device *dev);
+extern int drm_agp_release_ioctl(DRM_IOCTL_ARGS);
+extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
+extern int drm_agp_enable_ioctl(DRM_IOCTL_ARGS);
+extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
+extern int drm_agp_info_ioctl(DRM_IOCTL_ARGS);
+extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
+extern int drm_agp_alloc_ioctl(DRM_IOCTL_ARGS);
+extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
+extern int drm_agp_free_ioctl(DRM_IOCTL_ARGS);
+extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
+extern int drm_agp_unbind_ioctl(DRM_IOCTL_ARGS);
+extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
+extern int drm_agp_bind_ioctl(DRM_IOCTL_ARGS);
+extern void *drm_agp_allocate_memory(size_t pages, uint32_t type, struct drm_device *dev);
+extern int drm_agp_free_memory(agp_allocate_t *handle, drm_device_t *dev);
+extern int drm_agp_bind_memory(unsigned int, uint32_t, drm_device_t *dev);
+extern int drm_agp_unbind_memory(unsigned long, drm_device_t *dev);
+extern void drm_agp_chipset_flush(struct drm_device *dev);
+
+extern void drm_agp_cleanup(drm_device_t *);
+extern int drm_agp_bind_pages(struct drm_device *dev, pfn_t *pages,
+ unsigned long num_pages, uint32_t gtt_offset, unsigned int agp_type);
+extern int drm_agp_unbind_pages(struct drm_device *dev, pfn_t *pages,
+ unsigned long num_pages, uint32_t gtt_offset, pfn_t scratch, uint32_t type);
+extern int drm_agp_rw_gtt(struct drm_device *dev, unsigned long num_pages,
+ uint32_t gtt_offset, void *gttp, uint32_t type);
+
+/* kstat support (drm_kstats.c) */
+int drm_init_kstats(drm_device_t *);
+void drm_fini_kstats(drm_device_t *);
+
+/* Scatter Gather Support (drm_scatter.c) */
+extern void drm_sg_cleanup(drm_sg_mem_t *);
+
+/* ATI PCIGART support (ati_pcigart.c) */
+int drm_ati_pcigart_init(drm_device_t *, drm_ati_pcigart_info *);
+int drm_ati_pcigart_cleanup(drm_device_t *, drm_ati_pcigart_info *);
+
+ /* Locking IOCTL support (drm_lock.h) */
+extern int drm_lock(DRM_IOCTL_ARGS);
+extern int drm_unlock(DRM_IOCTL_ARGS);
+extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
+extern void drm_idlelock_take(struct drm_lock_data *lock_data);
+extern void drm_idlelock_release(struct drm_lock_data *lock_data);
+
+int drm_setversion(DRM_IOCTL_ARGS);
+struct drm_local_map *drm_getsarea(struct drm_device *dev);
+/* Cache management (drm_cache.c) */
+void drm_clflush_pages(caddr_t *pages, unsigned long num_pages);
+
+/* Misc. IOCTL support (drm_ioctl.c) */
+int drm_irq_by_busid(DRM_IOCTL_ARGS);
+int drm_getunique(DRM_IOCTL_ARGS);
+int drm_setunique(DRM_IOCTL_ARGS);
+int drm_getmap(DRM_IOCTL_ARGS);
+int drm_getclient(DRM_IOCTL_ARGS);
+int drm_getstats(DRM_IOCTL_ARGS);
+int drm_getcap(DRM_IOCTL_ARGS);
+int drm_noop(DRM_IOCTL_ARGS);
+
+/* Context IOCTL support (drm_context.c) */
+int drm_resctx(DRM_IOCTL_ARGS);
+int drm_addctx(DRM_IOCTL_ARGS);
+int drm_modctx(DRM_IOCTL_ARGS);
+int drm_getctx(DRM_IOCTL_ARGS);
+int drm_switchctx(DRM_IOCTL_ARGS);
+int drm_newctx(DRM_IOCTL_ARGS);
+int drm_rmctx(DRM_IOCTL_ARGS);
+int drm_setsareactx(DRM_IOCTL_ARGS);
+int drm_getsareactx(DRM_IOCTL_ARGS);
+
+/* Drawable IOCTL support (drm_drawable.c) */
+int drm_adddraw(DRM_IOCTL_ARGS);
+int drm_rmdraw(DRM_IOCTL_ARGS);
+int drm_update_drawable_info(DRM_IOCTL_ARGS);
+struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
+ drm_drawable_t id);
+void drm_drawable_free_all(struct drm_device *dev);
+
+
+ /* Authentication IOCTL support (drm_auth.h) */
+int drm_getmagic(DRM_IOCTL_ARGS);
+int drm_authmagic(DRM_IOCTL_ARGS);
+int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
+
+/* Buffer management support (drm_bufs.c) */
+int drm_addmap_ioctl(DRM_IOCTL_ARGS);
+int drm_rmmap_ioctl(DRM_IOCTL_ARGS);
+int drm_addbufs(DRM_IOCTL_ARGS);
+int drm_infobufs(DRM_IOCTL_ARGS);
+int drm_markbufs(DRM_IOCTL_ARGS);
+int drm_freebufs(DRM_IOCTL_ARGS);
+int drm_mapbufs(DRM_IOCTL_ARGS);
+
+/* IRQ support (drm_irq.c) */
+int drm_control(DRM_IOCTL_ARGS);
+int drm_wait_vblank(DRM_IOCTL_ARGS);
+
+/* Scatter Gather Support (drm_scatter.c) */
+int drm_sg_alloc_ioctl(DRM_IOCTL_ARGS);
+int drm_sg_free(DRM_IOCTL_ARGS);
+
+extern int drm_debug_flag;
+extern int mdb_track_enable;
+
+extern struct list_head drm_iomem_list;
+
+#define MDB_TRACK_ENABLE mdb_track_enable
+
+#define TRACE_GEM_OBJ_HISTORY(obj, str) \
+do { \
+ if (MDB_TRACK_ENABLE) { \
+ int seq_t = 0; \
+ if (obj->ring) \
+ seq_t = obj->ring->get_seqno(obj->ring, true); \
+ drm_gem_object_track(&obj->base, str, \
+ seq_t, 0, \
+ (void *)obj->ring); \
+ } \
+} while (*"\0")
+
+/* We add function to support DRM_DEBUG,DRM_ERROR,DRM_INFO */
+/* PRINTFLIKE4 */
+extern void drm_debug_print(int cmn_err, const char* func, int line, const char *fmt, ...);
+/* PRINTFLIKE1 */
+extern void drm_debug(const char *fmt, ...);
+/* PRINTFLIKE1 */
+extern void drm_error(const char *fmt, ...);
+/* PRINTFLIKE1 */
+extern void drm_info(const char *fmt, ...);
+
+#ifdef DEBUG
+#define DRM_DEBUG(...) \
+ do { \
+ if (drm_debug_flag & 0x08) \
+ drm_debug_print( \
+ CE_NOTE, __func__, __LINE__, ##__VA_ARGS__); \
+ } while (__lintzero)
+#define DRM_DEBUG_KMS(...) \
+ do { \
+ if (drm_debug_flag & 0x04) \
+ drm_debug_print( \
+ CE_NOTE, __func__, __LINE__, ##__VA_ARGS__); \
+ } while (__lintzero)
+#define DRM_DEBUG_DRIVER(...) \
+ do { \
+ if (drm_debug_flag & 0x02) \
+ drm_debug_print( \
+ CE_NOTE, __func__, __LINE__, ##__VA_ARGS__); \
+ } while (__lintzero)
+#define DRM_INFO(...) \
+ do { \
+ if (drm_debug_flag & 0x01) \
+ drm_debug_print( \
+ CE_NOTE, __func__, __LINE__, ##__VA_ARGS__); \
+ } while (__lintzero)
+#else
+#define DRM_DEBUG(...) do { } while (__lintzero)
+#define DRM_DEBUG_KMS(...) do { } while (__lintzero)
+#define DRM_DEBUG_DRIVER(...) do { } while (__lintzero)
+#define DRM_INFO(...) do { } while (__lintzero)
+#endif
+
+#define DRM_ERROR(...) \
+ drm_debug_print(CE_WARN, __func__, __LINE__, ##__VA_ARGS__)
+
+#define DRM_LOG_KMS DRM_INFO
+
+extern int drm_lastclose(struct drm_device *dev);
+
+extern int drm_open(struct drm_minor *, int, int, cred_t *);
+extern int drm_release(struct drm_file *);
+extern ssize_t drm_read(struct drm_file *, struct uio *);
+extern short drm_poll(struct drm_file *, short);
+extern int drm_init(struct drm_device *, struct drm_driver *);
+extern void drm_exit(struct drm_device *);
+extern void drm_fm_init(struct drm_device *dev);
+extern void drm_fm_fini(struct drm_device *dev);
+extern void drm_fm_ereport(struct drm_device *dev, char *detail);
+extern int drm_check_dma_handle(struct drm_device *dev, ddi_dma_handle_t handle);
+extern int drm_check_acc_handle(struct drm_device *dev, ddi_acc_handle_t handle);
+extern int drm_ioctl(dev_t dev_id, struct drm_file *file_priv,
+ int cmd, intptr_t arg, int mode, cred_t *credp);
+
+extern int drm_put_minor(struct drm_minor **minor_p);
+
+extern int drm_setmaster_ioctl(DRM_IOCTL_ARGS);
+extern int drm_dropmaster_ioctl(DRM_IOCTL_ARGS);
+extern struct drm_master *drm_master_create(struct drm_minor *minor);
+extern struct drm_master *drm_master_get(struct drm_master *master);
+extern void drm_master_put(struct drm_master **master);
+extern int drm_get_dev(struct drm_device *dev, struct pci_dev *pdev,
+ struct drm_driver *driver, unsigned long driver_data);
+extern void drm_put_dev(struct drm_device *dev);
+
+void drm_master_release(struct drm_device *dev, struct drm_file *fpriv);
+
+extern unsigned int drm_vblank_offdelay;
+extern unsigned int drm_timestamp_precision;
+
+/* sysfs support (drm_sysfs.c) */
+extern int drm_sysfs_device_add(struct drm_minor *minor);
+extern void drm_sysfs_device_remove(struct drm_minor *minor);
+
+/* Graphics Execution Manager library functions (drm_gem.c) */
+int drm_gem_init(struct drm_device *dev);
+void drm_gem_destroy(struct drm_device *dev);
+void drm_gem_object_release(struct drm_gem_object *obj);
+void drm_gem_object_free(struct kref *kref);
+void drm_gem_object_free_unlocked(struct kref *kref);
+struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
+ size_t size);
+int drm_gem_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size, int gen);
+int drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size);
+void drm_gem_object_handle_free(struct drm_gem_object *obj);
+
+extern void
+drm_gem_object_reference(struct drm_gem_object *obj);
+
+extern void
+drm_gem_object_unreference(struct drm_gem_object *obj);
+
+extern void
+drm_gem_object_unreference_unlocked(struct drm_gem_object *obj);
+
+int drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep);
+int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
+
+extern void
+drm_gem_object_handle_reference(struct drm_gem_object *obj);
+
+extern void
+drm_gem_object_handle_unreference(struct drm_gem_object *obj);
+
+extern void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj);
+
+struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
+ struct drm_file *filp,
+ u32 handle);
+int drm_gem_close_ioctl(DRM_IOCTL_ARGS);
+int drm_gem_flink_ioctl(DRM_IOCTL_ARGS);
+int drm_gem_open_ioctl(DRM_IOCTL_ARGS);
+void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
+void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
+void drm_gem_mmap(struct drm_gem_object *obj, pfn_t pfn);
+void drm_gem_release_mmap(struct drm_gem_object *obj);
+void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
+
+extern struct drm_local_map *drm_core_findmap(struct drm_device *dev,
+ unsigned int token);
+
+#ifdef NEVER
+static __inline__ int drm_device_is_pcie(struct drm_device *dev)
+{
+ return 0;
+}
+#endif
+
+void drm_gem_object_track(struct drm_gem_object *obj, const char *name,
+ uint32_t cur_seq, uint32_t last_seq, void* ptr);
+
+#endif /* _DRMP_H */
diff --git a/usr/src/uts/common/io/drm/drm_atomic.h b/usr/src/uts/common/drm/drm_atomic.h
index 0adc70c..7c6b7c6 100644
--- a/usr/src/uts/common/io/drm/drm_atomic.h
+++ b/usr/src/uts/common/drm/drm_atomic.h
@@ -1,7 +1,9 @@
+/* BEGIN CSTYLED */
+
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*/
+
/*
* \file drm_atomic.h
* Atomic operations used in the DRM which may or may not be provided by the OS.
@@ -11,7 +13,7 @@
/*
* Copyright 2004 Eric Anholt
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2012, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -55,14 +57,22 @@ typedef uint32_t atomic_t;
#define atomic_set(p, v) (*(p) = (v))
#define atomic_read(p) (*(p))
-#define atomic_inc(p) atomic_inc_uint(p)
+#define atomic_inc(p) atomic_add_int(p, 1)
#define atomic_dec(p) atomic_dec_uint(p)
+#define atomic_dec_and_test(p) \
+ ((0 == atomic_dec_32_nv(p)) ? 1 : 0)
#define atomic_add(n, p) atomic_add_int(p, n)
+#define atomic_add_return(n, p) (atomic_add_int(p, n), *p)
#define atomic_sub(n, p) atomic_add_int(p, -n)
#define atomic_set_int(p, bits) atomic_or_uint(p, bits)
#define atomic_clear_int(p, bits) atomic_and_uint(p, ~(bits))
#define atomic_cmpset_int(p, c, n) \
((c == atomic_cas_uint(p, c, n)) ? 1 : 0)
+#define atomic_clear_mask(mask, p) (*(p) &= ~mask)
+#define atomic_set_mask(mask, p) (*(p) |= mask)
+#define atomic_inc_not_zero(p) \
+ if (atomic_read(p) != 0) \
+ atomic_inc(p);
#define set_bit(b, p) \
atomic_set_int(((volatile uint_t *)(void *)p) + (b >> 5), \
diff --git a/usr/src/uts/common/drm/drm_core.h b/usr/src/uts/common/drm/drm_core.h
new file mode 100644
index 0000000..1ef6fc7
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_core.h
@@ -0,0 +1,49 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_CORE_H__
+#define __DRM_CORE_H__
+
+#define CORE_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
+
+#define CORE_NAME "drm"
+#define CORE_DESC "DRM shared core routines"
+#define CORE_DATE "20060810"
+
+#define DRM_IF_MAJOR 1
+#define DRM_IF_MINOR 4
+
+#define CORE_MAJOR 1
+#define CORE_MINOR 1
+#define CORE_PATCHLEVEL 0
+
+#endif /* __DRM_CORE_H__ */
diff --git a/usr/src/uts/common/drm/drm_crtc.h b/usr/src/uts/common/drm/drm_crtc.h
new file mode 100644
index 0000000..8d254d7
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_crtc.h
@@ -0,0 +1,1068 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_CRTC_H__
+#define __DRM_CRTC_H__
+
+#include <sys/ksynch.h>
+#include "drm.h"
+#include "drmP.h"
+#include "drm_sun_idr.h"
+#include "drm_sun_i2c.h"
+#include "drm_fourcc.h"
+
+struct drm_device;
+struct drm_mode_set;
+struct drm_framebuffer;
+struct drm_object_properties;
+
+
+#define DRM_MODE_OBJECT_CRTC 0xcccccccc
+#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
+#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
+#define DRM_MODE_OBJECT_MODE 0xdededede
+#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
+#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
+#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+
+struct drm_mode_object {
+ uint32_t id;
+ uint32_t type;
+ struct drm_object_properties *properties;
+};
+
+#define DRM_OBJECT_MAX_PROPERTY 24
+struct drm_object_properties {
+ int count;
+ uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
+ uint64_t values[DRM_OBJECT_MAX_PROPERTY];
+};
+
+/*
+ * Note on terminology: here, for brevity and convenience, we refer to connector
+ * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
+ * DVI, etc. And 'screen' refers to the whole of the visible display, which
+ * may span multiple monitors (and therefore multiple CRTC and connector
+ * structures).
+ */
+
+enum drm_mode_status {
+ MODE_OK = 0, /* Mode OK */
+ MODE_HSYNC, /* hsync out of range */
+ MODE_VSYNC, /* vsync out of range */
+ MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
+ MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
+ MODE_BAD_WIDTH, /* requires an unsupported linepitch */
+ MODE_NOMODE, /* no mode with a maching name */
+ MODE_NO_INTERLACE, /* interlaced mode not supported */
+ MODE_NO_DBLESCAN, /* doublescan mode not supported */
+ MODE_NO_VSCAN, /* multiscan mode not supported */
+ MODE_MEM, /* insufficient video memory */
+ MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
+ MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
+ MODE_MEM_VIRT, /* insufficient video memory given virtual size */
+ MODE_NOCLOCK, /* no fixed clock available */
+ MODE_CLOCK_HIGH, /* clock required is too high */
+ MODE_CLOCK_LOW, /* clock required is too low */
+ MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
+ MODE_BAD_HVALUE, /* horizontal timing was out of range */
+ MODE_BAD_VVALUE, /* vertical timing was out of range */
+ MODE_BAD_VSCAN, /* VScan value out of range */
+ MODE_HSYNC_NARROW, /* horizontal sync too narrow */
+ MODE_HSYNC_WIDE, /* horizontal sync too wide */
+ MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
+ MODE_HBLANK_WIDE, /* horizontal blanking too wide */
+ MODE_VSYNC_NARROW, /* vertical sync too narrow */
+ MODE_VSYNC_WIDE, /* vertical sync too wide */
+ MODE_VBLANK_NARROW, /* vertical blanking too narrow */
+ MODE_VBLANK_WIDE, /* vertical blanking too wide */
+ MODE_PANEL, /* exceeds panel dimensions */
+ MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
+ MODE_ONE_WIDTH, /* only one width is supported */
+ MODE_ONE_HEIGHT, /* only one height is supported */
+ MODE_ONE_SIZE, /* only one resolution is supported */
+ MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
+ MODE_UNVERIFIED = -3, /* mode needs to reverified */
+ MODE_BAD = -2, /* unspecified reason */
+ MODE_ERROR = -1 /* error condition */
+};
+
+#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
+ DRM_MODE_TYPE_CRTC_C)
+
+#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
+ .name = nm, .status = 0, .type = (t), .clock = (c), \
+ .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
+ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
+ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
+ .vscan = (vs), .flags = (f), \
+ .base.type = DRM_MODE_OBJECT_MODE
+
+#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
+
+struct drm_display_mode {
+ /* Header */
+ struct list_head head;
+ struct drm_mode_object base;
+
+ char name[DRM_DISPLAY_MODE_LEN];
+
+ enum drm_mode_status status;
+ unsigned int type;
+
+ /* Proposed mode values */
+ int clock; /* in kHz */
+ int hdisplay;
+ int hsync_start;
+ int hsync_end;
+ int htotal;
+ int hskew;
+ int vdisplay;
+ int vsync_start;
+ int vsync_end;
+ int vtotal;
+ int vscan;
+ unsigned int flags;
+
+ /* Addressable image size (may be 0 for projectors, etc.) */
+ int width_mm;
+ int height_mm;
+
+ /* Actual mode we give to hw */
+ int clock_index;
+ int synth_clock;
+ int crtc_hdisplay;
+ int crtc_hblank_start;
+ int crtc_hblank_end;
+ int crtc_hsync_start;
+ int crtc_hsync_end;
+ int crtc_htotal;
+ int crtc_hskew;
+ int crtc_vdisplay;
+ int crtc_vblank_start;
+ int crtc_vblank_end;
+ int crtc_vsync_start;
+ int crtc_vsync_end;
+ int crtc_vtotal;
+
+ /* Driver private mode info */
+ int private_size;
+ int *private;
+ int private_flags;
+
+ int vrefresh; /* in Hz */
+ int hsync; /* in kHz */
+};
+
+enum drm_connector_status {
+ connector_status_connected = 1,
+ connector_status_disconnected = 2,
+ connector_status_unknown = 3,
+};
+
+enum subpixel_order {
+ SubPixelUnknown = 0,
+ SubPixelHorizontalRGB,
+ SubPixelHorizontalBGR,
+ SubPixelVerticalRGB,
+ SubPixelVerticalBGR,
+ SubPixelNone,
+};
+
+#define DRM_COLOR_FORMAT_RGB444 (1<<0)
+#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
+#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
+/*
+ * Describes a given display (e.g. CRT or flat panel) and its limitations.
+ */
+struct drm_display_info {
+ char name[DRM_DISPLAY_INFO_LEN];
+
+ /* Physical size */
+ unsigned int width_mm;
+ unsigned int height_mm;
+
+ /* Clock limits FIXME: storage format */
+ unsigned int min_vfreq, max_vfreq;
+ unsigned int min_hfreq, max_hfreq;
+ unsigned int pixel_clock;
+ unsigned int bpc;
+
+ enum subpixel_order subpixel_order;
+ u32 color_formats;
+
+ u8 cea_rev;
+};
+
+struct drm_framebuffer_funcs {
+ /* note: use drm_framebuffer_remove() */
+ void (*destroy)(struct drm_framebuffer *framebuffer);
+ int (*create_handle)(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle);
+ /**
+ * Optinal callback for the dirty fb ioctl.
+ *
+ * Userspace can notify the driver via this callback
+ * that a area of the framebuffer has changed and should
+ * be flushed to the display hardware.
+ *
+ * See documentation in drm_mode.h for the struct
+ * drm_mode_fb_dirty_cmd for more information as all
+ * the semantics and arguments have a one to one mapping
+ * on this function.
+ */
+ int (*dirty)(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv, unsigned flags,
+ unsigned color, struct drm_clip_rect *clips,
+ unsigned num_clips);
+};
+
+struct drm_framebuffer {
+ struct drm_device *dev;
+ /*
+ * Note that the fb is refcounted for the benefit of driver internals,
+ * for example some hw, disabling a CRTC/plane is asynchronous, and
+ * scanout does not actually complete until the next vblank. So some
+ * cleanup (like releasing the reference(s) on the backing GEM bo(s))
+ * should be deferred. In cases like this, the driver would like to
+ * hold a ref to the fb even though it has already been removed from
+ * userspace perspective.
+ */
+ struct kref refcount;
+ /*
+ * Place on the dev->mode_config.fb_list, access protected by
+ * dev->mode_config.fb_lock.
+ */
+ struct list_head head;
+ struct drm_mode_object base;
+ const struct drm_framebuffer_funcs *funcs;
+ unsigned int pitches[4];
+ unsigned int offsets[4];
+ unsigned int width;
+ unsigned int height;
+ /* depth can be 15 or 16 */
+ unsigned int depth;
+ int bits_per_pixel;
+ int flags;
+ uint32_t pixel_format; /* fourcc format */
+ struct list_head filp_head;
+ /* if you are using the helper */
+ void *helper_private;
+};
+
+struct drm_property_blob {
+ struct drm_mode_object base;
+ struct list_head head;
+ unsigned int length;
+ unsigned char data[];
+};
+
+struct drm_property_enum {
+ uint64_t value;
+ struct list_head head;
+ char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_property {
+ struct list_head head;
+ struct drm_mode_object base;
+ uint32_t flags;
+ char name[DRM_PROP_NAME_LEN];
+ uint32_t num_values;
+ uint64_t *values;
+
+ struct list_head enum_blob_list;
+};
+
+struct drm_crtc;
+struct drm_connector;
+struct drm_encoder;
+struct drm_pending_vblank_event;
+struct drm_plane;
+
+/**
+ * drm_crtc_funcs - control CRTCs for a given device
+ * @save: save CRTC state
+ * @restore: restore CRTC state
+ * @reset: reset CRTC after state has been invalidate (e.g. resume)
+ * @cursor_set: setup the cursor
+ * @cursor_move: move the cursor
+ * @gamma_set: specify color ramp for CRTC
+ * @destroy: deinit and free object
+ * @set_property: called when a property is changed
+ * @set_config: apply a new CRTC configuration
+ * @page_flip: initiate a page flip
+ *
+ * The drm_crtc_funcs structure is the central CRTC management structure
+ * in the DRM. Each CRTC controls one or more connectors (note that the name
+ * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc.
+ * connectors, not just CRTs).
+ *
+ * Each driver is responsible for filling out this structure at startup time,
+ * in addition to providing other modesetting features, like i2c and DDC
+ * bus accessors.
+ */
+struct drm_crtc_funcs {
+ /* Save CRTC state */
+ void (*save)(struct drm_crtc *crtc); /* suspend? */
+ /* Restore CRTC state */
+ void (*restore)(struct drm_crtc *crtc); /* resume? */
+ /* Reset CRTC state */
+ void (*reset)(struct drm_crtc *crtc);
+
+ /* cursor controls */
+ int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height);
+ int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height,
+ int32_t hot_x, int32_t hot_y);
+ int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
+
+ /* Set gamma on the CRTC */
+ void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size);
+ /* Object destroy routine */
+ void (*destroy)(struct drm_crtc *crtc);
+
+ int (*set_config)(struct drm_mode_set *set);
+
+ /*
+ * Flip to the given framebuffer. This implements the page
+ * flip ioctl descibed in drm_mode.h, specifically, the
+ * implementation must return immediately and block all
+ * rendering to the current fb until the flip has completed.
+ * If userspace set the event flag in the ioctl, the event
+ * argument will point to an event to send back when the flip
+ * completes, otherwise it will be NULL.
+ */
+ int (*page_flip)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);
+
+ int (*set_property)(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val);
+};
+
+/**
+ * drm_crtc - central CRTC control structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object for ID tracking etc.
+ * @enabled: is this CRTC enabled?
+ * @mode: current mode timings
+ * @hwmode: mode timings as programmed to hw regs
+ * @invert_dimensions: for purposes of error checking crtc vs fb sizes,
+ * invert the width/height of the crtc. This is used if the driver
+ * is performing 90 or 270 degree rotated scanout
+ * @x: x position on screen
+ * @y: y position on screen
+ * @funcs: CRTC control functions
+ * @gamma_size: size of gamma ramp
+ * @gamma_store: gamma ramp values
+ * @framedur_ns: precise frame timing
+ * @framedur_ns: precise line timing
+ * @pixeldur_ns: precise pixel timing
+ * @helper_private: mid-layer private data
+ * @properties: property tracking for this CRTC
+ *
+ * Each CRTC may have one or more connectors associated with it. This structure
+ * allows the CRTC to be controlled.
+ */
+struct drm_crtc {
+ struct drm_device *dev;
+ struct list_head head;
+
+ /**
+ * crtc mutex
+ *
+ * This provides a read lock for the overall crtc state (mode, dpms
+ * state, ...) and a write lock for everything which can be update
+ * without a full modeset (fb, cursor data, ...)
+ */
+ struct mutex mutex;
+
+ struct drm_mode_object base;
+
+ /* framebuffer the connector is currently bound to */
+ struct drm_framebuffer *fb;
+
+ /* Temporary tracking of the old fb while a modeset is ongoing. Used
+ * by drm_mode_set_config_internal to implement correct refcounting. */
+ struct drm_framebuffer *old_fb;
+
+ bool enabled;
+
+ /* Requested mode from modesetting. */
+ struct drm_display_mode mode;
+
+ /* Programmed mode in hw, after adjustments for encoders,
+ * crtc, panel scaling etc. Needed for timestamping etc.
+ */
+ struct drm_display_mode hwmode;
+
+ bool invert_dimensions;
+
+ int x, y;
+ const struct drm_crtc_funcs *funcs;
+
+ /* CRTC gamma size for reporting to userspace */
+ uint32_t gamma_size;
+ uint16_t *gamma_store;
+
+ /* Constants needed for precise vblank and swap timestamping. */
+ s64 framedur_ns, linedur_ns, pixeldur_ns;
+
+ /* if you are using the helper */
+ void *helper_private;
+
+ struct drm_object_properties properties;
+};
+
+
+/**
+ * drm_connector_funcs - control connectors on a given device
+ * @dpms: set power state (see drm_crtc_funcs above)
+ * @save: save connector state
+ * @restore: restore connector state
+ * @reset: reset connector after state has been invalidate (e.g. resume)
+ * @detect: is this connector active?
+ * @get_modes: get mode list for this connector
+ * @set_property: property for this connector may need update
+ * @destroy: make object go away
+ * @force: notify the driver the connector is forced on
+ *
+ * Each CRTC may have one or more connectors attached to it. The functions
+ * below allow the core DRM code to control connectors, enumerate available modes,
+ * etc.
+ */
+struct drm_connector_funcs {
+ void (*dpms)(struct drm_connector *connector, int mode);
+ void (*save)(struct drm_connector *connector);
+ void (*restore)(struct drm_connector *connector);
+ void (*reset)(struct drm_connector *connector);
+
+ /* Check to see if anything is attached to the connector.
+ * @force is set to false whilst polling, true when checking the
+ * connector due to user request. @force can be used by the driver
+ * to avoid expensive, destructive operations during automated
+ * probing.
+ */
+ enum drm_connector_status (*detect)(struct drm_connector *connector,
+ bool force);
+ int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
+ int (*set_property)(struct drm_connector *connector, struct drm_property *property,
+ uint64_t val);
+ void (*destroy)(struct drm_connector *connector);
+ void (*force)(struct drm_connector *connector);
+};
+
+/**
+ * drm_encoder_funcs - encoder controls
+ * @reset: reset state (e.g. at init or resume time)
+ * @destroy: cleanup and free associated data
+ *
+ * Encoders sit between CRTCs and connectors.
+ */
+struct drm_encoder_funcs {
+ void (*reset)(struct drm_encoder *encoder);
+ void (*destroy)(struct drm_encoder *encoder);
+};
+
+#define DRM_CONNECTOR_MAX_UMODES 16
+#define DRM_CONNECTOR_LEN 32
+#define DRM_CONNECTOR_MAX_ENCODER 3
+
+/**
+ * drm_encoder - central DRM encoder structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object
+ * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
+ * @possible_crtcs: bitmask of potential CRTC bindings
+ * @possible_clones: bitmask of potential sibling encoders for cloning
+ * @crtc: currently bound CRTC
+ * @funcs: control functions
+ * @helper_private: mid-layer private data
+ *
+ * CRTCs drive pixels to encoders, which convert them into signals
+ * appropriate for a given connector or set of connectors.
+ */
+struct drm_encoder {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_mode_object base;
+ int encoder_type;
+ uint32_t possible_crtcs;
+ uint32_t possible_clones;
+
+ struct drm_crtc *crtc;
+ const struct drm_encoder_funcs *funcs;
+ void *helper_private;
+};
+
+enum drm_connector_force {
+ DRM_FORCE_UNSPECIFIED,
+ DRM_FORCE_OFF,
+ DRM_FORCE_ON, /* force on analog part normally */
+ DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
+};
+
+/* should we poll this connector for connects and disconnects */
+/* hot plug detectable */
+#define DRM_CONNECTOR_POLL_HPD (1 << 0)
+/* poll for connections */
+#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
+/* can cleanly poll for disconnections without flickering the screen */
+/* DACs should rarely do this without a lot of testing */
+#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
+
+#define MAX_ELD_BYTES 128
+
+/**
+ * drm_connector - central DRM connector control structure
+ * @dev: parent DRM device
+ * @kdev: kernel device for sysfs attributes
+ * @attr: sysfs attributes
+ * @head: list management
+ * @base: base KMS object
+ * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ * @connector_type_id: index into connector type enum
+ * @interlace_allowed: can this connector handle interlaced modes?
+ * @doublescan_allowed: can this connector handle doublescan?
+ * @modes: modes available on this connector (from fill_modes() + user)
+ * @status: one of the drm_connector_status enums (connected, not, or unknown)
+ * @probed_modes: list of modes derived directly from the display
+ * @display_info: information about attached display (e.g. from EDID)
+ * @funcs: connector control functions
+ * @edid_blob_ptr: DRM property containing EDID if present
+ * @property_ids: property tracking for this connector
+ * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
+ * @dpms: current dpms state
+ * @helper_private: mid-layer private data
+ * @force: a %DRM_FORCE_<foo> state for forced mode sets
+ * @encoder_ids: valid encoders for this connector
+ * @encoder: encoder driving this connector, if any
+ * @eld: EDID-like data, if present
+ * @dvi_dual: dual link DVI, if found
+ * @max_tmds_clock: max clock rate, if found
+ * @latency_present: AV delay info from ELD, if found
+ * @video_latency: video latency info from ELD, if found
+ * @audio_latency: audio latency info from ELD, if found
+ * @null_edid_counter: track sinks that give us all zeros for the EDID
+ *
+ * Each connector may be connected to one or more CRTCs, or may be clonable by
+ * another connector if they can share a CRTC. Each connector also has a specific
+ * position in the broader display (referred to as a 'screen' though it could
+ * span multiple monitors).
+ */
+struct drm_connector {
+ struct drm_device *dev;
+ //struct device kdev;
+ struct device_attribute *attr;
+ struct list_head head;
+
+ struct drm_mode_object base;
+
+ int connector_type;
+ int connector_type_id;
+ bool interlace_allowed;
+ bool doublescan_allowed;
+ struct list_head modes; /* list of modes on this connector */
+
+ enum drm_connector_status status;
+
+ /* these are modes added by probing with DDC or the BIOS */
+ struct list_head probed_modes;
+
+ struct drm_display_info display_info;
+ const struct drm_connector_funcs *funcs;
+
+ struct drm_property_blob *edid_blob_ptr;
+ struct drm_object_properties properties;
+
+ uint8_t polled; /* DRM_CONNECTOR_POLL_* */
+
+ /* requested DPMS state */
+ int dpms;
+
+ void *helper_private;
+
+ /* forced on connector */
+ enum drm_connector_force force;
+ uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+ struct drm_encoder *encoder; /* currently active encoder */
+
+ /* EDID bits */
+ uint8_t eld[MAX_ELD_BYTES];
+ bool dvi_dual;
+ int max_tmds_clock; /* in MHz */
+ bool latency_present[2];
+ int video_latency[2]; /* [0]: progressive, [1]: interlaced */
+ int audio_latency[2];
+ int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+ unsigned bad_edid_counter;
+};
+
+/**
+ * drm_plane_funcs - driver plane control functions
+ * @update_plane: update the plane configuration
+ * @disable_plane: shut down the plane
+ * @destroy: clean up plane resources
+ * @set_property: called when a property is changed
+ */
+struct drm_plane_funcs {
+ int (*update_plane)(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+ int (*disable_plane)(struct drm_plane *plane);
+ void (*destroy)(struct drm_plane *plane);
+
+ int (*set_property)(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val);
+};
+
+/**
+ * drm_plane - central DRM plane control structure
+ * @dev: DRM device this plane belongs to
+ * @head: for list management
+ * @base: base mode object
+ * @possible_crtcs: pipes this plane can be bound to
+ * @format_types: array of formats supported by this plane
+ * @format_count: number of formats supported
+ * @crtc: currently bound CRTC
+ * @fb: currently bound fb
+ * @funcs: helper functions
+ * @properties: property tracking for this plane
+ */
+struct drm_plane {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_mode_object base;
+
+ uint32_t possible_crtcs;
+ uint32_t *format_types;
+ uint32_t format_count;
+
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+
+ const struct drm_plane_funcs *funcs;
+
+ struct drm_object_properties properties;
+};
+
+/**
+ * drm_mode_set - new values for a CRTC config change
+ * @head: list management
+ * @fb: framebuffer to use for new config
+ * @crtc: CRTC whose configuration we're about to change
+ * @mode: mode timings to use
+ * @x: position of this CRTC relative to @fb
+ * @y: position of this CRTC relative to @fb
+ * @connectors: array of connectors to drive with this CRTC if possible
+ * @num_connectors: size of @connectors array
+ *
+ * Represents a single crtc the connectors that it drives with what mode
+ * and from which framebuffer it scans out from.
+ *
+ * This is used to set modes.
+ */
+struct drm_mode_set {
+ struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
+ struct drm_display_mode *mode;
+
+ uint32_t x;
+ uint32_t y;
+
+ struct drm_connector **connectors;
+ size_t num_connectors;
+};
+
+/**
+ * struct drm_mode_config_funcs - basic driver provided mode setting functions
+ * @fb_create: create a new framebuffer object
+ * @output_poll_changed: function to handle output configuration changes
+ *
+ * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
+ * involve drivers.
+ */
+struct drm_mode_config_funcs {
+ struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+ void (*output_poll_changed)(struct drm_device *dev);
+};
+
+/**
+ * drm_mode_group - group of mode setting resources for potential sub-grouping
+ * @num_crtcs: CRTC count
+ * @num_encoders: encoder count
+ * @num_connectors: connector count
+ * @id_list: list of KMS object IDs in this group
+ *
+ * Currently this simply tracks the global mode setting state. But in the
+ * future it could allow groups of objects to be set aside into independent
+ * control groups for use by different user level processes (e.g. two X servers
+ * running simultaneously on different heads, each with their own mode
+ * configuration and freedom of mode setting).
+ */
+struct drm_mode_group {
+ uint32_t num_crtcs;
+ uint32_t num_encoders;
+ uint32_t num_connectors;
+
+ /* list of object IDs for this group */
+ uint32_t *id_list;
+};
+
+/**
+ * drm_mode_config - Mode configuration control structure
+ * @mutex: mutex protecting KMS related lists and structures
+ * @idr_mutex: mutex for KMS ID allocation and management
+ * @crtc_idr: main KMS ID tracking object
+ * @num_fb: number of fbs available
+ * @fb_list: list of framebuffers available
+ * @num_connector: number of connectors on this device
+ * @connector_list: list of connector objects
+ * @num_encoder: number of encoders on this device
+ * @encoder_list: list of encoder objects
+ * @num_crtc: number of CRTCs on this device
+ * @crtc_list: list of CRTC objects
+ * @min_width: minimum pixel width on this device
+ * @min_height: minimum pixel height on this device
+ * @max_width: maximum pixel width on this device
+ * @max_height: maximum pixel height on this device
+ * @funcs: core driver provided mode setting functions
+ * @fb_base: base address of the framebuffer
+ * @poll_enabled: track polling status for this device
+ * @output_poll_work: delayed work for polling in process context
+ * @*_property: core property tracking
+ *
+ * Core mode resource tracking structure. All CRTC, encoders, and connectors
+ * enumerated by the driver are added here, as are global properties. Some
+ * global restrictions are also here, e.g. dimension restrictions.
+ */
+struct drm_mode_config {
+ kmutex_t mutex; /* protects configuration (mode lists etc.) */
+ kmutex_t idr_mutex; /* for IDR management */
+ struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
+ /**
+ * fb_lock - mutex to protect fb state
+ *
+ * Besides the global fb list his also protects the fbs list in the
+ * file_priv
+ */
+ struct mutex fb_lock;
+ /* this is limited to one for now */
+ int num_fb;
+ struct list_head fb_list;
+ int num_connector;
+ struct list_head connector_list;
+ int num_encoder;
+ struct list_head encoder_list;
+ int num_plane;
+ struct list_head plane_list;
+
+ int num_crtc;
+ struct list_head crtc_list;
+
+ struct list_head property_list;
+
+ int min_width, min_height;
+ int max_width, max_height;
+ const struct drm_mode_config_funcs *funcs;
+ resource_size_t fb_base;
+
+ /* output poll support */
+ bool poll_enabled;
+ bool poll_running;
+
+ /* pointers to standard properties */
+ struct list_head property_blob_list;
+ struct drm_property *edid_property;
+ struct drm_property *dpms_property;
+
+ /* DVI-I properties */
+ struct drm_property *dvi_i_subconnector_property;
+ struct drm_property *dvi_i_select_subconnector_property;
+
+ /* TV properties */
+ struct drm_property *tv_subconnector_property;
+ struct drm_property *tv_select_subconnector_property;
+ struct drm_property *tv_mode_property;
+ struct drm_property *tv_left_margin_property;
+ struct drm_property *tv_right_margin_property;
+ struct drm_property *tv_top_margin_property;
+ struct drm_property *tv_bottom_margin_property;
+ struct drm_property *tv_brightness_property;
+ struct drm_property *tv_contrast_property;
+ struct drm_property *tv_flicker_reduction_property;
+ struct drm_property *tv_overscan_property;
+ struct drm_property *tv_saturation_property;
+ struct drm_property *tv_hue_property;
+
+ /* Optional properties */
+ struct drm_property *scaling_mode_property;
+ struct drm_property *dithering_mode_property;
+ struct drm_property *dirty_info_property;
+
+ /* dumb ioctl parameters */
+ uint32_t preferred_depth, prefer_shadow;
+};
+
+#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
+#define obj_to_connector(x) container_of(x, struct drm_connector, base)
+#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
+#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
+#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
+#define obj_to_property(x) container_of(x, struct drm_property, base)
+#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
+#define obj_to_plane(x) container_of(x, struct drm_plane, base)
+
+struct drm_prop_enum_list {
+ int type;
+ char *name;
+};
+
+extern void drm_modeset_lock_all(struct drm_device *dev);
+extern void drm_modeset_unlock_all(struct drm_device *dev);
+
+extern int drm_crtc_init(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs);
+extern void drm_crtc_cleanup(struct drm_crtc *crtc);
+
+extern int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type);
+
+extern void drm_connector_cleanup(struct drm_connector *connector);
+/* helper to unplug all connectors from sysfs for device */
+extern void drm_connector_unplug_all(struct drm_device *dev);
+
+extern int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type);
+
+extern int drm_plane_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool priv);
+extern void drm_plane_cleanup(struct drm_plane *plane);
+extern void drm_plane_force_disable(struct drm_plane *plane);
+
+extern void drm_encoder_cleanup(struct drm_encoder *encoder);
+
+extern const char *drm_get_connector_name(const struct drm_connector *connector);
+extern const char *drm_get_connector_status_name(enum drm_connector_status status);
+extern const char *drm_get_dpms_name(int val);
+extern const char *drm_get_dvi_i_subconnector_name(int val);
+extern const char *drm_get_dvi_i_select_name(int val);
+extern const char *drm_get_tv_subconnector_name(int val);
+extern const char *drm_get_tv_select_name(int val);
+extern void drm_fb_release(struct drm_file *file_priv);
+extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
+extern bool drm_probe_ddc(struct i2c_adapter *adapter);
+extern struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
+extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
+extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
+extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+ const struct drm_display_mode *mode);
+extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
+extern void drm_mode_config_init(struct drm_device *dev);
+extern void drm_mode_config_reset(struct drm_device *dev);
+extern void drm_mode_config_cleanup(struct drm_device *dev);
+extern void drm_mode_set_name(struct drm_display_mode *mode);
+extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+extern bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+extern int drm_mode_width(const struct drm_display_mode *mode);
+extern int drm_mode_height(const struct drm_display_mode *mode);
+
+/* for us by fb module */
+extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
+extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
+extern void drm_mode_list_concat(struct list_head *head,
+ struct list_head *new);
+extern void drm_mode_validate_size(struct drm_device *dev,
+ struct list_head *mode_list,
+ int maxX, int maxY, int maxPitch);
+extern void drm_mode_validate_clocks(struct drm_device *dev,
+ struct list_head *mode_list,
+ int *min, int *max, int n_ranges);
+extern void drm_mode_prune_invalid(struct drm_device *dev,
+ struct list_head *mode_list, bool verbose);
+extern void drm_mode_sort(struct list_head *mode_list);
+extern int drm_mode_hsync(const struct drm_display_mode *mode);
+extern int drm_mode_vrefresh(const struct drm_display_mode *mode);
+extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
+ int adjust_flags);
+extern void drm_mode_connector_list_update(struct drm_connector *connector);
+extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+ struct edid *edid);
+extern int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t val);
+extern int drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t *value);
+extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
+extern void drm_framebuffer_set_object(struct drm_device *dev,
+ unsigned long handle);
+extern int drm_framebuffer_init(struct drm_device *dev,
+ struct drm_framebuffer *fb,
+ const struct drm_framebuffer_funcs *funcs);
+extern struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id);
+extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
+extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
+extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
+extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
+extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
+extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
+extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
+extern bool drm_crtc_in_use(struct drm_crtc *crtc);
+
+extern void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val);
+extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ const char *name, int num_values);
+extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values);
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values);
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max);
+extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
+extern int drm_property_add_enum(struct drm_property *property, int index,
+ uint64_t value, const char *name);
+extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
+extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
+ char *formats[]);
+extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+extern int drm_mode_create_dithering_property(struct drm_device *dev);
+extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
+extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
+
+extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder);
+extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder);
+extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ int gamma_size);
+extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type);
+extern int drm_mode_set_config_internal(struct drm_mode_set *set);
+/* IOCTLs */
+extern int drm_mode_getresources(DRM_IOCTL_ARGS);
+extern int drm_mode_getplane_res(DRM_IOCTL_ARGS);
+extern int drm_mode_getcrtc(DRM_IOCTL_ARGS);
+extern int drm_mode_getconnector(DRM_IOCTL_ARGS);
+extern int drm_mode_setcrtc(DRM_IOCTL_ARGS);
+extern int drm_mode_getplane(DRM_IOCTL_ARGS);
+extern int drm_mode_setplane(DRM_IOCTL_ARGS);
+extern int drm_mode_cursor_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_cursor2_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_addfb(DRM_IOCTL_ARGS);
+extern int drm_mode_addfb2(DRM_IOCTL_ARGS);
+extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
+extern int drm_mode_rmfb(DRM_IOCTL_ARGS);
+extern int drm_mode_getfb(DRM_IOCTL_ARGS);
+extern int drm_mode_dirtyfb_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_getproperty_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_getblob_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_connector_property_set_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_hotplug_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_replacefb(DRM_IOCTL_ARGS);
+extern int drm_mode_getencoder(DRM_IOCTL_ARGS);
+extern int drm_mode_gamma_get_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_gamma_set_ioctl(DRM_IOCTL_ARGS);
+extern u8 *drm_find_cea_extension(struct edid *edid);
+extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
+extern bool drm_detect_hdmi_monitor(struct edid *edid);
+extern bool drm_detect_monitor_audio(struct edid *edid);
+extern bool drm_rgb_quant_range_selectable(struct edid *edid);
+extern int drm_mode_page_flip_ioctl(DRM_IOCTL_ARGS);
+extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool reduced, bool interlaced, bool margins);
+extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool interlaced, int margins);
+extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool interlaced, int margins, int GTF_M,
+ int GTF_2C, int GTF_K, int GTF_2J);
+extern int drm_add_modes_noedid(struct drm_connector *connector,
+ int hdisplay, int vdisplay);
+
+extern int drm_edid_header_is_valid(const u8 *raw_edid);
+extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
+extern bool drm_edid_is_valid(struct edid *edid);
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh,
+ bool rb);
+
+extern int drm_mode_create_dumb_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_mmap_dumb_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_destroy_dumb_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_obj_get_properties_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_obj_set_property_ioctl(DRM_IOCTL_ARGS);
+
+extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp);
+extern int drm_format_num_planes(uint32_t format);
+extern int drm_format_plane_cpp(uint32_t format, int plane);
+extern int drm_format_horz_chroma_subsampling(uint32_t format);
+extern int drm_format_vert_chroma_subsampling(uint32_t format);
+extern const char *drm_get_format_name(uint32_t format);
+
+#endif /* __DRM_CRTC_H__ */
diff --git a/usr/src/uts/common/drm/drm_crtc_helper.h b/usr/src/uts/common/drm/drm_crtc_helper.h
new file mode 100644
index 0000000..af826a2
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_crtc_helper.h
@@ -0,0 +1,176 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * The DRM mode setting helper functions are common code for drivers to use if
+ * they wish. Drivers are not forced to use this code in their
+ * implementations but it would be useful if they code they do use at least
+ * provides a consistent interface and operation to userspace
+ */
+
+#ifndef __DRM_CRTC_HELPER_H__
+#define __DRM_CRTC_HELPER_H__
+
+#include <sys/types.h>
+#include "drmP.h"
+#include "drm_sun_idr.h"
+
+enum mode_set_atomic {
+ LEAVE_ATOMIC_MODE_SET,
+ ENTER_ATOMIC_MODE_SET,
+};
+
+/**
+ * drm_crtc_helper_funcs - helper operations for CRTCs
+ * @mode_fixup: try to fixup proposed mode for this connector
+ * @mode_set: set this mode
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_crtc_helper_funcs {
+ /*
+ * Control power levels on the CRTC. If the mode passed in is
+ * unsupported, the provider must use the next lowest power level.
+ */
+ void (*dpms)(struct drm_crtc *crtc, int mode);
+ void (*prepare)(struct drm_crtc *crtc);
+ void (*commit)(struct drm_crtc *crtc);
+
+ /* Provider can fixup or change mode timings before modeset occurs */
+ bool (*mode_fixup)(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ /* Actually set the mode */
+ int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
+
+ /* Move the crtc on the current fb to the given position *optional* */
+ int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+ int (*mode_set_base_atomic)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic);
+
+ /* reload the current crtc LUT */
+ void (*load_lut)(struct drm_crtc *crtc);
+
+ /* disable crtc when not in use - more explicit than dpms off */
+ void (*disable)(struct drm_crtc *crtc);
+};
+
+/**
+ * drm_encoder_helper_funcs - helper operations for encoders
+ * @mode_fixup: try to fixup proposed mode for this connector
+ * @mode_set: set this mode
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_encoder_helper_funcs {
+ void (*dpms)(struct drm_encoder *encoder, int mode);
+ void (*save)(struct drm_encoder *encoder);
+ void (*restore)(struct drm_encoder *encoder);
+
+ bool (*mode_fixup)(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*prepare)(struct drm_encoder *encoder);
+ void (*commit)(struct drm_encoder *encoder);
+ void (*mode_set)(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
+ /* detect for DAC style encoders */
+ enum drm_connector_status (*detect)(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+ /* disable encoder when not in use - more explicit than dpms off */
+ void (*disable)(struct drm_encoder *encoder);
+};
+
+/**
+ * drm_connector_helper_funcs - helper operations for connectors
+ * @get_modes: get mode list for this connector
+ * @mode_valid: is this mode valid on the given connector?
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_connector_helper_funcs {
+ int (*get_modes)(struct drm_connector *connector);
+ int (*mode_valid)(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+ struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
+};
+
+extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
+extern void drm_helper_disable_unused_functions(struct drm_device *dev);
+extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
+extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb);
+extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
+extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
+
+extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+
+extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
+
+extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+
+static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
+ const struct drm_crtc_helper_funcs *funcs)
+{
+ crtc->helper_private = (void *)funcs;
+}
+
+static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
+ const struct drm_encoder_helper_funcs *funcs)
+{
+ encoder->helper_private = (void *)funcs;
+}
+
+static inline void drm_connector_helper_add(struct drm_connector *connector,
+ const struct drm_connector_helper_funcs *funcs)
+{
+ connector->helper_private = (void *)funcs;
+}
+
+extern void drm_helper_resume_force_mode(struct drm_device *dev);
+extern void drm_kms_helper_poll_init(struct drm_device *dev);
+extern void drm_kms_helper_poll_fini(struct drm_device *dev);
+extern void drm_helper_hpd_irq_event(struct drm_device *dev);
+extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
+
+extern void drm_kms_helper_poll_disable(struct drm_device *dev);
+extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+
+#endif /* __DRM_CRTC_HELPER_H__ */
diff --git a/usr/src/uts/common/drm/drm_dp_helper.h b/usr/src/uts/common/drm/drm_dp_helper.h
new file mode 100644
index 0000000..55c8e85
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_dp_helper.h
@@ -0,0 +1,368 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _DRM_DP_HELPER_H_
+#define _DRM_DP_HELPER_H_
+
+#include "drm_linux.h"
+#include "drm_sun_i2c.h"
+
+/*
+ * Unless otherwise noted, all values are from the DP 1.1a spec. Note that
+ * DP and DPCD versions are independent. Differences from 1.0 are not noted,
+ * 1.0 devices basically don't exist in the wild.
+ *
+ * Abbreviations, in chronological order:
+ *
+ * eDP: Embedded DisplayPort version 1
+ * DPI: DisplayPort Interoperability Guideline v1.1a
+ * 1.2: DisplayPort 1.2
+ *
+ * 1.2 formally includes both eDP and DPI definitions.
+ */
+
+#define AUX_NATIVE_WRITE 0x8
+#define AUX_NATIVE_READ 0x9
+#define AUX_I2C_WRITE 0x0
+#define AUX_I2C_READ 0x1
+#define AUX_I2C_STATUS 0x2
+#define AUX_I2C_MOT 0x4
+
+#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
+#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
+#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
+#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
+
+#define AUX_I2C_REPLY_ACK (0x0 << 6)
+#define AUX_I2C_REPLY_NACK (0x1 << 6)
+#define AUX_I2C_REPLY_DEFER (0x2 << 6)
+#define AUX_I2C_REPLY_MASK (0x3 << 6)
+
+/* AUX CH addresses */
+/* DPCD */
+#define DP_DPCD_REV 0x000
+
+#define DP_MAX_LINK_RATE 0x001
+
+#define DP_MAX_LANE_COUNT 0x002
+# define DP_MAX_LANE_COUNT_MASK 0x1f
+# define DP_TPS3_SUPPORTED (1 << 6)
+# define DP_ENHANCED_FRAME_CAP (1 << 7)
+
+#define DP_MAX_DOWNSPREAD 0x003
+# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
+
+#define DP_NORP 0x004
+
+#define DP_DOWNSTREAMPORT_PRESENT 0x005
+# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
+# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
+/* 00b = DisplayPort */
+/* 01b = Analog */
+/* 10b = TMDS or HDMI */
+/* 11b = Other */
+# define DP_FORMAT_CONVERSION (1 << 3)
+# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
+
+#define DP_MAIN_LINK_CHANNEL_CODING 0x006
+
+#define DP_DOWN_STREAM_PORT_COUNT 0x007
+# define DP_PORT_COUNT_MASK 0x0f
+# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
+# define DP_OUI_SUPPORT (1 << 7)
+
+#define DP_I2C_SPEED_CAP 0x00c /* DPI */
+# define DP_I2C_SPEED_1K 0x01
+# define DP_I2C_SPEED_5K 0x02
+# define DP_I2C_SPEED_10K 0x04
+# define DP_I2C_SPEED_100K 0x08
+# define DP_I2C_SPEED_400K 0x10
+# define DP_I2C_SPEED_1M 0x20
+
+#define DP_EDP_CONFIGURATION_CAP 0x00d
+#define DP_TRAINING_AUX_RD_INTERVAL 0x00e
+
+/* Multiple stream transport */
+#define DP_MSTM_CAP 0x021 /* 1.2 */
+# define DP_MST_CAP (1 << 0)
+
+#define DP_PSR_SUPPORT 0x070
+# define DP_PSR_IS_SUPPORTED 1
+#define DP_PSR_CAPS 0x071
+# define DP_PSR_NO_TRAIN_ON_EXIT 1
+# define DP_PSR_SETUP_TIME_330 (0 << 1)
+# define DP_PSR_SETUP_TIME_275 (1 << 1)
+# define DP_PSR_SETUP_TIME_220 (2 << 1)
+# define DP_PSR_SETUP_TIME_165 (3 << 1)
+# define DP_PSR_SETUP_TIME_110 (4 << 1)
+# define DP_PSR_SETUP_TIME_55 (5 << 1)
+# define DP_PSR_SETUP_TIME_0 (6 << 1)
+# define DP_PSR_SETUP_TIME_MASK (7 << 1)
+# define DP_PSR_SETUP_TIME_SHIFT 1
+
+/*
+ * 0x80-0x8f describe downstream port capabilities, but there are two layouts
+ * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not,
+ * each port's descriptor is one byte wide. If it was set, each port's is
+ * four bytes wide, starting with the one byte from the base info. As of
+ * DP interop v1.1a only VGA defines additional detail.
+ */
+
+/* offset 0 */
+#define DP_DOWNSTREAM_PORT_0 0x80
+# define DP_DS_PORT_TYPE_MASK (7 << 0)
+# define DP_DS_PORT_TYPE_DP 0
+# define DP_DS_PORT_TYPE_VGA 1
+# define DP_DS_PORT_TYPE_DVI 2
+# define DP_DS_PORT_TYPE_HDMI 3
+# define DP_DS_PORT_TYPE_NON_EDID 4
+# define DP_DS_PORT_HPD (1 << 3)
+/* offset 1 for VGA is maximum megapixels per second / 8 */
+/* offset 2 */
+# define DP_DS_VGA_MAX_BPC_MASK (3 << 0)
+# define DP_DS_VGA_8BPC 0
+# define DP_DS_VGA_10BPC 1
+# define DP_DS_VGA_12BPC 2
+# define DP_DS_VGA_16BPC 3
+
+/* link configuration */
+#define DP_LINK_BW_SET 0x100
+# define DP_LINK_BW_1_62 0x06
+# define DP_LINK_BW_2_7 0x0a
+# define DP_LINK_BW_5_4 0x14
+
+#define DP_LANE_COUNT_SET 0x101
+# define DP_LANE_COUNT_MASK 0x0f
+# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
+
+#define DP_TRAINING_PATTERN_SET 0x102
+# define DP_TRAINING_PATTERN_DISABLE 0
+# define DP_TRAINING_PATTERN_1 1
+# define DP_TRAINING_PATTERN_2 2
+# define DP_TRAINING_PATTERN_3 3
+# define DP_TRAINING_PATTERN_MASK 0x3
+
+# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
+# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
+# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
+# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
+# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
+
+# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
+# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
+
+# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
+# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
+# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
+# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
+
+#define DP_TRAINING_LANE0_SET 0x103
+#define DP_TRAINING_LANE1_SET 0x104
+#define DP_TRAINING_LANE2_SET 0x105
+#define DP_TRAINING_LANE3_SET 0x106
+
+# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
+# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
+# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
+# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
+
+# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
+
+# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
+# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
+
+#define DP_DOWNSPREAD_CTRL 0x107
+# define DP_SPREAD_AMP_0_5 (1 << 4)
+# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
+
+#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
+# define DP_SET_ANSI_8B10B (1 << 0)
+
+#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */
+/* bitmask as for DP_I2C_SPEED_CAP */
+
+#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
+
+#define DP_MSTM_CTRL 0x111 /* 1.2 */
+# define DP_MST_EN (1 << 0)
+# define DP_UP_REQ_EN (1 << 1)
+# define DP_UPSTREAM_IS_SRC (1 << 2)
+
+#define DP_PSR_EN_CFG 0x170
+# define DP_PSR_ENABLE (1 << 0)
+# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
+# define DP_PSR_CRC_VERIFICATION (1 << 2)
+# define DP_PSR_FRAME_CAPTURE (1 << 3)
+
+#define DP_SINK_COUNT 0x200
+/* prior to 1.2 bit 7 was reserved mbz */
+# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
+# define DP_SINK_CP_READY (1 << 6)
+
+#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201
+# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
+# define DP_AUTOMATED_TEST_REQUEST (1 << 1)
+# define DP_CP_IRQ (1 << 2)
+# define DP_SINK_SPECIFIC_IRQ (1 << 6)
+
+#define DP_LANE0_1_STATUS 0x202
+#define DP_LANE2_3_STATUS 0x203
+# define DP_LANE_CR_DONE (1 << 0)
+# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
+# define DP_LANE_SYMBOL_LOCKED (1 << 2)
+
+#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \
+ DP_LANE_CHANNEL_EQ_DONE | \
+ DP_LANE_SYMBOL_LOCKED)
+
+#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
+
+#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
+#define DP_LINK_STATUS_UPDATED (1 << 7)
+
+#define DP_SINK_STATUS 0x205
+
+#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
+#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+
+#define DP_ADJUST_REQUEST_LANE0_1 0x206
+#define DP_ADJUST_REQUEST_LANE2_3 0x207
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+
+#define DP_TEST_REQUEST 0x218
+# define DP_TEST_LINK_TRAINING (1 << 0)
+# define DP_TEST_LINK_PATTERN (1 << 1)
+# define DP_TEST_LINK_EDID_READ (1 << 2)
+# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */
+
+#define DP_TEST_LINK_RATE 0x219
+# define DP_LINK_RATE_162 (0x6)
+# define DP_LINK_RATE_27 (0xa)
+
+#define DP_TEST_LANE_COUNT 0x220
+
+#define DP_TEST_PATTERN 0x221
+
+#define DP_TEST_RESPONSE 0x260
+# define DP_TEST_ACK (1 << 0)
+# define DP_TEST_NAK (1 << 1)
+# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
+
+#define DP_SOURCE_OUI 0x300
+#define DP_SINK_OUI 0x400
+#define DP_BRANCH_OUI 0x500
+
+#define DP_SET_POWER 0x600
+# define DP_SET_POWER_D0 0x1
+# define DP_SET_POWER_D3 0x2
+
+#define DP_PSR_ERROR_STATUS 0x2006
+# define DP_PSR_LINK_CRC_ERROR (1 << 0)
+# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
+
+#define DP_PSR_ESI 0x2007
+# define DP_PSR_CAPS_CHANGE (1 << 0)
+
+#define DP_PSR_STATUS 0x2008
+# define DP_PSR_SINK_INACTIVE 0
+# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1
+# define DP_PSR_SINK_ACTIVE_RFB 2
+# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3
+# define DP_PSR_SINK_ACTIVE_RESYNC 4
+# define DP_PSR_SINK_INTERNAL_ERROR 7
+# define DP_PSR_SINK_STATE_MASK 0x07
+
+#define MODE_I2C_START 1
+#define MODE_I2C_WRITE 2
+#define MODE_I2C_READ 4
+#define MODE_I2C_STOP 8
+
+/**
+ * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
+ * aux algorithm
+ * @running: set by the algo indicating whether an i2c is ongoing or whether
+ * the i2c bus is quiescent
+ * @address: i2c target address for the currently ongoing transfer
+ * @aux_ch: driver callback to transfer a single byte of the i2c payload
+ */
+struct i2c_algo_dp_aux_data {
+ bool running;
+ u16 address;
+ int (*aux_ch) (struct i2c_adapter *adapter,
+ int mode, uint8_t write_byte,
+ uint8_t *read_byte);
+};
+
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
+
+#define DP_LINK_STATUS_SIZE 6
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+
+#define DP_RECEIVER_CAP_SIZE 0xf
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate);
+int drm_dp_bw_code_to_link_rate(u8 link_bw);
+
+int drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+static inline u8
+drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+}
+
+#endif /* _DRM_DP_HELPER_H_ */
diff --git a/usr/src/uts/common/drm/drm_edid.h b/usr/src/uts/common/drm/drm_edid.h
new file mode 100644
index 0000000..4622515
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_edid.h
@@ -0,0 +1,295 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_EDID_H__
+#define __DRM_EDID_H__
+
+#include "drm.h"
+#include "drmP.h"
+#define EDID_LENGTH 128
+#define DDC_ADDR 0x50
+
+#define CEA_EXT 0x02
+#define VTB_EXT 0x10
+#define DI_EXT 0x40
+#define LS_EXT 0x50
+#define MI_EXT 0x60
+
+#pragma pack(1)
+struct est_timings {
+ u8 t1;
+ u8 t2;
+ u8 mfg_rsvd;
+} __attribute__((packed));
+#pragma pack()
+
+/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
+#define EDID_TIMING_ASPECT_SHIFT 6
+#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT)
+
+/* need to add 60 */
+#define EDID_TIMING_VFREQ_SHIFT 0
+#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT)
+
+#pragma pack(1)
+struct std_timing {
+ u8 hsize; /* need to multiply by 8 then add 248 */
+ u8 vfreq_aspect;
+} __attribute__((packed));
+#pragma pack()
+
+#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
+#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
+#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
+#define DRM_EDID_PT_STEREO (1 << 5)
+#define DRM_EDID_PT_INTERLACED (1 << 7)
+
+/* If detailed data is pixel timing */
+#pragma pack(1)
+struct detailed_pixel_timing {
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hactive_hblank_hi;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vactive_vblank_hi;
+ u8 hsync_offset_lo;
+ u8 hsync_pulse_width_lo;
+ u8 vsync_offset_pulse_width_lo;
+ u8 hsync_vsync_offset_pulse_width_hi;
+ u8 width_mm_lo;
+ u8 height_mm_lo;
+ u8 width_height_mm_hi;
+ u8 hborder;
+ u8 vborder;
+ u8 misc;
+} __attribute__((packed));
+#pragma pack()
+
+/* If it's not pixel timing, it'll be one of the below */
+#pragma pack(1)
+struct detailed_data_string {
+ u8 str[13];
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct detailed_data_monitor_range {
+ u8 min_vfreq;
+ u8 max_vfreq;
+ u8 min_hfreq_khz;
+ u8 max_hfreq_khz;
+ u8 pixel_clock_mhz; /* need to multiply by 10 */
+ u8 flags;
+ union {
+ struct {
+ u8 reserved;
+ u8 hfreq_start_khz; /* need to multiply by 2 */
+ u8 c; /* need to divide by 2 */
+ __u16 m;
+ u8 k;
+ u8 j; /* need to divide by 2 */
+ } __attribute__((packed)) gtf2;
+ struct {
+ u8 version;
+ u8 data1; /* high 6 bits: extra clock resolution */
+ u8 data2; /* plus low 2 of above: max hactive */
+ u8 supported_aspects;
+ u8 flags; /* preferred aspect and blanking support */
+ u8 supported_scalings;
+ u8 preferred_refresh;
+ } __attribute__((packed)) cvt;
+ } formula;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct detailed_data_wpindex {
+ u8 white_yx_lo; /* Lower 2 bits each */
+ u8 white_x_hi;
+ u8 white_y_hi;
+ u8 gamma; /* need to divide by 100 then add 1 */
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct detailed_data_color_point {
+ u8 windex1;
+ u8 wpindex1[3];
+ u8 windex2;
+ u8 wpindex2[3];
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct cvt_timing {
+ u8 code[3];
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct detailed_non_pixel {
+ u8 pad1;
+ u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
+ fb=color point data, fa=standard timing data,
+ f9=undefined, f8=mfg. reserved */
+ u8 pad2;
+ union {
+ struct detailed_data_string str;
+ struct detailed_data_monitor_range range;
+ struct detailed_data_wpindex color;
+ struct std_timing timings[5];
+ struct cvt_timing cvt[4];
+ } data;
+} __attribute__((packed));
+#pragma pack()
+
+#define EDID_DETAIL_EST_TIMINGS 0xf7
+#define EDID_DETAIL_CVT_3BYTE 0xf8
+#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
+#define EDID_DETAIL_STD_MODES 0xfa
+#define EDID_DETAIL_MONITOR_CPDATA 0xfb
+#define EDID_DETAIL_MONITOR_NAME 0xfc
+#define EDID_DETAIL_MONITOR_RANGE 0xfd
+#define EDID_DETAIL_MONITOR_STRING 0xfe
+#define EDID_DETAIL_MONITOR_SERIAL 0xff
+
+#pragma pack(1)
+struct detailed_timing {
+ __u16 pixel_clock; /* need to multiply by 10 KHz */
+ union {
+ struct detailed_pixel_timing pixel_data;
+ struct detailed_non_pixel other_data;
+ } data;
+} __attribute__((packed));
+#pragma pack()
+
+#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
+#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1)
+#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2)
+#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3)
+#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
+#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
+#define DRM_EDID_INPUT_DIGITAL (1 << 7) /* bits below must be zero if set */
+#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4)
+#define DRM_EDID_DIGITAL_TYPE_UNDEF (0)
+#define DRM_EDID_DIGITAL_TYPE_DVI (1)
+#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2)
+#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3)
+#define DRM_EDID_DIGITAL_TYPE_MDDI (4)
+#define DRM_EDID_DIGITAL_TYPE_DP (5)
+
+#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
+#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
+#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
+#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
+/* If digital */
+#define DRM_EDID_FEATURE_COLOR_MASK (3 << 3)
+#define DRM_EDID_FEATURE_RGB (0 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB444 (1 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB422 (2 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB (3 << 3) /* both 4:4:4 and 4:2:2 */
+
+#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5)
+#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
+#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
+
+#pragma pack(1)
+struct edid {
+ u8 header[8];
+ /* Vendor & product info */
+ u8 mfg_id[2];
+ u8 prod_code[2];
+ __u32 serial; /* FIXME: byte order */
+ u8 mfg_week;
+ u8 mfg_year;
+ /* EDID version */
+ u8 version;
+ u8 revision;
+ /* Display info: */
+ u8 input;
+ u8 width_cm;
+ u8 height_cm;
+ u8 gamma;
+ u8 features;
+ /* Color characteristics */
+ u8 red_green_lo;
+ u8 black_white_lo;
+ u8 red_x;
+ u8 red_y;
+ u8 green_x;
+ u8 green_y;
+ u8 blue_x;
+ u8 blue_y;
+ u8 white_x;
+ u8 white_y;
+ /* Est. timings and mfg rsvd timings*/
+ struct est_timings established_timings;
+ /* Standard timings 1-8*/
+ struct std_timing standard_timings[8];
+ /* Detailing timings 1-4 */
+ struct detailed_timing detailed_timings[4];
+ /* Number of 128 byte ext. blocks */
+ u8 extensions;
+ /* Checksum */
+ u8 checksum;
+} __attribute__((packed));
+#pragma pack()
+
+#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
+
+/* define the number of Extension EDID block */
+#define DRM_MAX_EDID_EXT_NUM 4
+
+/* Short Audio Descriptor */
+struct cea_sad {
+ u8 format;
+ u8 channels; /* max number of channels - 1 */
+ u8 freq;
+ u8 byte2; /* meaning depends on format */
+};
+
+struct drm_encoder;
+struct drm_connector;
+struct drm_display_mode;
+void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
+int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
+int drm_av_sync_delay(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+
+#endif /* __DRM_EDID_H__ */
diff --git a/usr/src/uts/common/drm/drm_fb_helper.h b/usr/src/uts/common/drm/drm_fb_helper.h
new file mode 100644
index 0000000..ec8d330
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_fb_helper.h
@@ -0,0 +1,121 @@
+/* BEGIN CSTYLED */
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008, 2012, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+#ifndef _DRM_FB_HELPER_H
+#define _DRM_FB_HELPER_H
+
+struct drm_fb_helper;
+
+struct drm_fb_helper_crtc {
+ struct drm_mode_set mode_set;
+ struct drm_display_mode *desired_mode;
+};
+
+struct drm_fb_helper_surface_size {
+ u32 fb_width;
+ u32 fb_height;
+ u32 surface_width;
+ u32 surface_height;
+ u32 surface_bpp;
+ u32 surface_depth;
+};
+
+/**
+ * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
+ * @gamma_set: - Set the given gamma lut register on the given crtc.
+ * @gamma_get: - Read the given gamma lut register on the given crtc, used to
+ * save the current lut when force-restoring the fbdev for e.g.
+ * kdbg.
+ * @fb_probe: - Driver callback to allocate and initialize the fbdev info
+ * structure. Futhermore it also needs to allocate the drm
+ * framebuffer used to back the fbdev.
+ *
+ * Driver callbacks used by the fbdev emulation helper library.
+ */
+struct drm_fb_helper_funcs {
+ void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+ void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+ int (*fb_probe)(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+ bool (*initial_config)(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **crtcs,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height);
+};
+
+struct drm_fb_helper_connector {
+ struct drm_connector *connector;
+ struct drm_cmdline_mode cmdline_mode;
+};
+
+struct drm_fb_helper {
+ struct drm_framebuffer *fb;
+ struct drm_device *dev;
+ int crtc_count;
+ struct drm_fb_helper_crtc *crtc_info;
+ int connector_count;
+ struct drm_fb_helper_connector **connector_info;
+ struct drm_fb_helper_funcs *funcs;
+ struct fb_info *fbdev;
+ u32 pseudo_palette[17];
+ struct list_head kernel_fb_list;
+
+ /* we got a hotplug but fbdev wasn't running the console
+ delay until next set_par */
+ bool delayed_hotplug;
+};
+
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper,
+ int preferred_bpp);
+
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *helper, int crtc_count,
+ int max_conn);
+void drm_fb_helper_fini(struct drm_fb_helper *helper);
+
+bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper);
+bool drm_fb_helper_force_kernel_mode(void);
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
+ uint32_t fb_width, uint32_t fb_height);
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+ uint32_t depth);
+int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
+void drm_register_fbops(struct drm_device *dev);
+int drm_getfb_size(struct drm_device *dev);
+#endif /* _DRM_FB_HELPER_H */
diff --git a/usr/src/uts/common/drm/drm_fourcc.h b/usr/src/uts/common/drm/drm_fourcc.h
new file mode 100644
index 0000000..7af2285
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_fourcc.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef DRM_FOURCC_H
+#define DRM_FOURCC_H
+
+#include <inttypes.h>
+
+#define fourcc_code(a,b,c,d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \
+ ((uint32_t)(c) << 16) | ((uint32_t)(d) << 24))
+
+#define DRM_FORMAT_BIG_ENDIAN (1UL<<31) /* format is big endian instead of little endian */
+
+/* color index */
+#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
+
+/* 8 bpp RGB */
+#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
+#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
+
+/* 16 bpp RGB */
+#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
+
+#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
+
+#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
+
+#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
+
+#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
+#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
+
+/* 24 bpp RGB */
+#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
+#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
+
+/* 32 bpp RGB */
+#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
+
+#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
+
+#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
+
+#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+
+/* packed YCbCr */
+#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
+#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
+
+#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [7:0] Y
+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
+ * or
+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
+ */
+#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
+
+/*
+ * 3 plane YCbCr
+ * index 0: Y plane, [7:0] Y
+ * index 1: Cb plane, [7:0] Cb
+ * index 2: Cr plane, [7:0] Cr
+ * or
+ * index 1: Cr plane, [7:0] Cr
+ * index 2: Cb plane, [7:0] Cb
+ */
+#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
+
+
+/*
+ * Format Modifiers:
+ *
+ * Format modifiers describe, typically, a re-ordering or modification
+ * of the data in a plane of an FB. This can be used to express tiled/
+ * swizzled formats, or compression, or a combination of the two.
+ *
+ * The upper 8 bits of the format modifier are a vendor-id as assigned
+ * below. The lower 56 bits are assigned as vendor sees fit.
+ */
+
+/* Vendor Ids: */
+#define DRM_FORMAT_MOD_NONE 0
+#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
+#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
+#define DRM_FORMAT_MOD_VENDOR_NV 0x03
+#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
+#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
+/* add more to the end as needed */
+
+#define fourcc_mod_code(vendor, val) \
+ ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
+
+/*
+ * Format Modifier tokens:
+ *
+ * When adding a new token please document the layout with a code comment,
+ * similar to the fourcc codes above. drm_fourcc.h is considered the
+ * authoritative source for all of these.
+ */
+
+/* Intel framebuffer modifiers */
+
+/*
+ * Intel X-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
+ * in row-major layout. Within the tile bytes are laid out row-major, with
+ * a platform-dependent stride. On top of that the memory can apply
+ * platform-depending swizzling of some higher address bits into bit6.
+ *
+ * This format is highly platforms specific and not useful for cross-driver
+ * sharing. It exists since on a given platform it does uniquely identify the
+ * layout in a simple way for i915-specific userspace.
+ */
+#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
+
+/*
+ * Intel Y-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
+ * in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes)
+ * chunks column-major, with a platform-dependent height. On top of that the
+ * memory can apply platform-depending swizzling of some higher address bits
+ * into bit6.
+ *
+ * This format is highly platforms specific and not useful for cross-driver
+ * sharing. It exists since on a given platform it does uniquely identify the
+ * layout in a simple way for i915-specific userspace.
+ */
+#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
+
+/*
+ * Intel Yf-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles in row-major layout.
+ * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
+ * are arranged in four groups (two wide, two high) with column-major layout.
+ * Each group therefore consits out of four 256 byte units, which are also laid
+ * out as 2x2 column-major.
+ * 256 byte units are made out of four 64 byte blocks of pixels, producing
+ * either a square block or a 2:1 unit.
+ * 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width
+ * in pixel depends on the pixel depth.
+ */
+#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
+
+/*
+ * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
+ *
+ * Macroblocks are laid in a Z-shape, and each pixel data is following the
+ * standard NV12 style.
+ * As for NV12, an image is the result of two frame buffers: one for Y,
+ * one for the interleaved Cb/Cr components (1/2 the height of the Y buffer).
+ * Alignment requirements are (for each buffer):
+ * - multiple of 128 pixels for the width
+ * - multiple of 32 pixels for the height
+ *
+ * For more information: see http://linuxtv.org/downloads/v4l-dvb-apis/re32.html
+ */
+#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
+
+#endif /* DRM_FOURCC_H */
diff --git a/usr/src/uts/common/io/drm/drm_io32.h b/usr/src/uts/common/drm/drm_io32.h
index e710697..d406b81 100644
--- a/usr/src/uts/common/io/drm/drm_io32.h
+++ b/usr/src/uts/common/drm/drm_io32.h
@@ -1,27 +1,28 @@
/*
- * CDDL HEADER START
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
*
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
*
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
*/
#ifndef _DRM_IO32_H_
@@ -81,19 +82,13 @@ typedef struct drm_stats_32 {
typedef struct drm_buf_desc_32 {
- int count; /* Number of buffers of this size */
- int size; /* Size in bytes */
- int low_mark; /* Low water mark */
- int high_mark; /* High water mark */
+ int count; /* Number of buffers of this size */
+ int size; /* Size in bytes */
+ int low_mark; /* Low water mark */
+ int high_mark; /* High water mark */
drm_buf_flag flags;
-
- /*
- * Start address of where the AGP buffers are
- * in the AGP aperture
- */
uint32_t agp_start;
-
-}drm_buf_desc_32_t;
+} drm_buf_desc_32_t;
typedef struct drm_buf_free_32 {
int count;
@@ -182,6 +177,29 @@ typedef union drm_wait_vblank_32 {
} drm_wait_vblank_32_t;
+extern int copyin32_drm_map(void *dest, void *src);
+extern int copyout32_drm_map(void *dest, void *src);
+extern int copyin32_drm_buf_desc(void * dest, void * src);
+extern int copyout32_drm_buf_desc(void * dest, void * src);
+extern int copyin32_drm_buf_free(void * dest, void * src);
+extern int copyin32_drm_buf_map(void * dest, void * src);
+extern int copyout32_drm_buf_map(void * dest, void * src);
+extern int copyin32_drm_ctx_priv_map(void * dest, void * src);
+extern int copyout32_drm_ctx_priv_map(void * dest, void * src);
+extern int copyin32_drm_ctx_res(void * dest, void * src);
+extern int copyout32_drm_ctx_res(void * dest, void * src);
+extern int copyin32_drm_unique(void * dest, void * src);
+extern int copyout32_drm_unique(void * dest, void * src);
+extern int copyin32_drm_client(void * dest, void * src);
+extern int copyout32_drm_client(void * dest, void * src);
+extern int copyout32_drm_stats(void * dest, void * src);
+extern int copyin32_drm_version(void * dest, void * src);
+extern int copyout32_drm_version(void * dest, void * src);
+extern int copyin32_drm_wait_vblank(void * dest, void * src);
+extern int copyout32_drm_wait_vblank(void * dest, void * src);
+extern int copyin32_drm_scatter_gather(void * dest, void * src);
+extern int copyout32_drm_scatter_gather(void * dest, void * src);
+
#endif /* _MULTI_DATAMODEL */
#endif /* _DRM_IO32_H_ */
diff --git a/usr/src/uts/common/drm/drm_linux.h b/usr/src/uts/common/drm/drm_linux.h
new file mode 100644
index 0000000..02baf92
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_linux.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012, 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __DRM_LINUX_H__
+#define __DRM_LINUX_H__
+
+#include <sys/types.h>
+#include <sys/byteorder.h>
+#include "drm_atomic.h"
+
+#define DRM_MEM_CACHED 0
+#define DRM_MEM_UNCACHED 1
+#define DRM_MEM_WC 2
+
+#ifndef min
+#define min(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef max
+#define max(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#define clamp_int64_t(val) \
+ val = min((int64_t)INT_MAX, val); \
+ val = max((int64_t)INT_MIN, val);
+
+#define ioremap_wc(base,size) drm_sun_ioremap((base), (size), DRM_MEM_WC)
+#define ioremap(base, size) drm_sun_ioremap((base), (size), DRM_MEM_UNCACHED)
+#define iounmap(addr) drm_sun_iounmap((addr))
+
+#define spinlock_t kmutex_t
+#define spin_lock_init(l) mutex_init((l), NULL, MUTEX_DRIVER, NULL);
+#define spin_lock(l) mutex_enter(l)
+#define spin_unlock(u) mutex_exit(u)
+#define spin_lock_irq(l) mutex_enter(l)
+#define spin_unlock_irq(u) mutex_exit(u)
+#ifdef __lint
+/*
+ * The following is to keep lint happy when it encouters the use of 'flag'.
+ * On Linux, this allows a local variable to be used to retain context,
+ * but is unused on Solaris. Rather than trying to place LINTED
+ * directives in the source, we actually consue the flag for lint here.
+ */
+#define spin_lock_irqsave(l, flag) flag = 0; mutex_enter(l)
+#define spin_unlock_irqrestore(u, flag) flag &= flag; mutex_exit(u)
+#else
+#define spin_lock_irqsave(l, flag) mutex_enter(l)
+#define spin_unlock_irqrestore(u, flag) mutex_exit(u)
+#endif
+
+#define mutex_lock(l) mutex_enter(l)
+#define mutex_unlock(u) mutex_exit(u)
+#define mutex_is_locked(l) mutex_owned(l)
+
+#define assert_spin_locked(l) ASSERT(MUTEX_HELD(l))
+
+#define kmalloc kmem_alloc
+#define kzalloc kmem_zalloc
+#define kcalloc(x, y, z) kzalloc((x)*(y), z)
+#define kfree kmem_free
+
+#define do_gettimeofday (void) uniqtime
+#define msleep_interruptible(s) DRM_UDELAY(s)
+#define timeval_to_ns(tvp) TICK_TO_NSEC(TIMEVAL_TO_TICK(tvp))
+#define ns_to_timeval(nsec, tvp) TICK_TO_TIMEVAL(NSEC_TO_TICK(nsec), tvp)
+
+#define GFP_KERNEL KM_SLEEP
+#define GFP_ATOMIC KM_SLEEP
+
+#define KHZ2PICOS(a) (1000000000UL/(a))
+
+#define udelay drv_usecwait
+#define mdelay(x) udelay((x) * 1000)
+#define msleep(x) mdelay((x))
+#define msecs_to_jiffies(x) drv_usectohz((x) * 1000)
+#define jiffies_to_msecs(x) drv_hztousec(x) / 1000
+#define time_after(a,b) ((long)(b) - (long)(a) < 0)
+#define time_after_eq(a,b) ((long)(a) - (long)(b) >= 0)
+#define time_before_eq(a,b) time_after_eq(b,a)
+#define time_in_range(a,b,c) \
+ (time_after_eq(a,b) && \
+ time_before_eq(a,c))
+
+#define jiffies ddi_get_lbolt()
+
+#ifdef _BIG_ENDIAN
+#define cpu_to_le16(x) LE_16(x)
+#define le16_to_cpu(x) LE_16(x)
+#else
+#define cpu_to_le16(x) (x)
+#define le16_to_cpu(x) (x)
+#endif
+
+#define swap(a, b) \
+ do { int tmp = (a); (a) = (b); (b) = tmp; } while (__lintzero)
+
+#define abs(x) ((x < 0) ? -x : x)
+
+#define div_u64(x, y) ((unsigned long long)(x))/((unsigned long long)(y)) /* XXX FIXME */
+#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+
+#define put_user(val,ptr) DRM_COPY_TO_USER(ptr,(&val),sizeof(val))
+#define get_user(x,ptr) DRM_COPY_FROM_USER((&x),ptr,sizeof(x))
+#define copy_to_user DRM_COPY_TO_USER
+#define copy_from_user DRM_COPY_FROM_USER
+#define unlikely(a) (a)
+
+#define AGP_USER_TYPES (1 << 16)
+#define AGP_USER_MEMORY (AGP_USER_TYPES)
+#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
+
+#define ALIGN(x, a) (((x) + ((a) - 1)) & ~((a) - 1))
+
+#define page_to_phys(x) *(uint32_t *)(uintptr_t)(x)
+#define in_dbg_master() 0
+
+#define BITS_PER_BYTE 8
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define POS_DIV_ROUND_CLOSEST(x, d) ((x + (d / 2)) / d)
+#define POS_DIV_ROUND_UP_ULL(x, d) DIV_ROUND_UP(x,d)
+
+typedef unsigned long dma_addr_t;
+typedef uint64_t u64;
+typedef int64_t s64;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint16_t u16;
+typedef uint8_t u8;
+typedef uint_t irqreturn_t;
+
+typedef int bool;
+
+#define true (1)
+#define false (0)
+
+#define __init
+#define __exit
+#define __iomem
+
+#ifdef _ILP32
+typedef u32 resource_size_t;
+#else /* _LP64 */
+typedef u64 resource_size_t;
+#endif
+
+typedef struct kref {
+ atomic_t refcount;
+} kref_t;
+
+extern void kref_init(struct kref *kref);
+extern void kref_get(struct kref *kref);
+extern void kref_put(struct kref *kref, void (*release)(struct kref *kref));
+
+extern unsigned int hweight16(unsigned int w);
+
+extern long IS_ERR(const void *ptr);
+#define IS_ERR_OR_NULL(ptr) (!ptr || IS_ERR(ptr))
+
+#ifdef __lint
+/*
+ * The actual code for _wait_for() causes Solaris lint2 to fail, though
+ * by all appearances, the code actually works (may try and peek at
+ * the compiled code to understand why). So to get around the problem,
+ * we create a special lint version for _wait_for().
+ */
+#define _wait_for(COND, MS, W) (! (COND))
+#else /* !__lint */
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (! (COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W) udelay(W); \
+ } \
+ ret__; \
+})
+#endif /* __lint */
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+
+#endif /* __DRM_LINUX_H__ */
diff --git a/usr/src/uts/common/drm/drm_linux_list.h b/usr/src/uts/common/drm/drm_linux_list.h
new file mode 100644
index 0000000..756d29f
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_linux_list.h
@@ -0,0 +1,165 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * drm_linux_list.h -- linux list functions for the BSDs.
+ * Created: Mon Apr 7 14:30:16 1999 by anholt@FreeBSD.org
+ */
+/*
+ * -
+ * Copyright 2003 Eric Anholt
+ * Copyright (c) 2009, 2012, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <anholt@FreeBSD.org>
+ *
+ */
+
+#ifndef _DRM_LINUX_LIST_H_
+#define _DRM_LINUX_LIST_H_
+
+#include <sys/types.h>
+#include <sys/param.h>
+struct list_head {
+ struct list_head *next, *prev;
+ caddr_t contain_ptr;
+};
+
+/* Cheat, assume the list_head is at the start of the struct */
+#define container_of(ptr, type, member) \
+ ((type *)(uintptr_t)((char *)(ptr) - (unsigned long)(&((type *)0)->member)))
+
+#define list_entry(ptr, type, member) \
+ ptr ? ((type *)(uintptr_t)(ptr->contain_ptr)) : NULL
+
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+#define list_empty(head) \
+ ((head)->next == head)
+
+#define INIT_LIST_HEAD(head) \
+do { \
+ (head)->next = head; \
+ (head)->prev = head; \
+ (head)->contain_ptr = NULL; \
+} while (*"\0")
+
+#define list_add(ptr, head, entry) \
+do { \
+ struct list_head *n_node = (head)->next; \
+ (ptr)->prev = head; \
+ (ptr)->next = (head)->next; \
+ n_node->prev = ptr; \
+ (head)->next = ptr; \
+ (ptr)->contain_ptr = entry; \
+} while (*"\0")
+
+#define list_add_tail(ptr, head, entry) \
+do { \
+ struct list_head *p_node = (head)->prev; \
+ (ptr)->prev = (head)->prev; \
+ (ptr)->next = head; \
+ p_node->next = ptr; \
+ (head)->prev = ptr; \
+ (ptr)->contain_ptr = entry; \
+} while (*"\0")
+
+#define list_del(ptr) \
+do { \
+ struct list_head *n_node = (ptr)->next; \
+ struct list_head *p_node = (ptr)->prev; \
+ n_node->prev = (ptr)->prev; \
+ p_node->next = (ptr)->next; \
+ (ptr)->prev = NULL; \
+ (ptr)->next = NULL; \
+} while (*"\0")
+
+#define list_del_init(ptr) \
+do { \
+ list_del(ptr); \
+ INIT_LIST_HEAD(ptr); \
+} while (*"\0")
+
+#define list_move(ptr, head, entry) \
+do { \
+ list_del(ptr); \
+ list_add(ptr, head, entry); \
+} while (*"\0")
+
+
+#define list_move_tail(ptr, head, entry) \
+do { \
+ list_del(ptr); \
+ list_add_tail(ptr, head, entry); \
+} while (*"\0")
+
+#define list_splice(list, be, ne) \
+do { \
+ if (!list_empty(list)) { \
+ struct list_head *first = (list)->next; \
+ struct list_head *last = (list)->prev; \
+ first->prev = be; \
+ (be)->next = first; \
+ last->next = ne; \
+ (ne)->prev = last; \
+ } \
+} while (*"\0")
+
+#define list_replace(old, new) \
+do { \
+ struct list_head *old_list = old; \
+ struct list_head *new_list = new; \
+ new_list->next = old_list->next; \
+ new_list->next->prev = new_list; \
+ new_list->prev = old_list->prev; \
+ new_list->prev->next = new_list; \
+} while (*"\0")
+
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != head; pos = (pos)->next)
+
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = (pos)->next; \
+ pos != head; \
+ pos = n, n = n->next)
+
+#define list_for_each_entry(pos, type, head, member) \
+ for (pos = list_entry((head)->next, type, member); pos; \
+ pos = list_entry(pos->member.next, type, member))
+
+#define list_for_each_entry_safe(pos, n, type, head, member) \
+ for (pos = list_entry((head)->next, type, member), \
+ n = pos ? list_entry(pos->member.next, type, member) : pos; \
+ pos; \
+ pos = n, \
+ n = list_entry((n ? n->member.next : (head)->next), type, member))
+
+#define list_for_each_entry_continue_reverse(pos, type, head, member) \
+ for (pos = list_entry(pos->member.prev, type, member); \
+ pos; \
+ pos = list_entry(pos->member.prev, type, member))
+#endif /* _DRM_LINUX_LIST_H_ */
diff --git a/usr/src/uts/common/drm/drm_mm.h b/usr/src/uts/common/drm/drm_mm.h
new file mode 100644
index 0000000..5578b41
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_mm.h
@@ -0,0 +1,242 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
+ */
+
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _DRM_MM_H_
+#define _DRM_MM_H_
+
+/*
+ * Generic range manager structs
+ */
+
+struct drm_mm_node {
+ struct list_head node_list;
+ struct list_head hole_stack;
+ unsigned hole_follows;
+ unsigned scanned_block;
+ unsigned scanned_prev_free;
+ unsigned scanned_next_free;
+ unsigned scanned_preceeds_hole;
+ unsigned allocated;
+ unsigned long color;
+ unsigned long start;
+ unsigned long size;
+ struct drm_mm *mm;
+};
+
+struct drm_mm {
+ /* List of free memory blocks, most recently freed ordered. */
+ struct list_head hole_stack;
+ /* head_node.node_list is the list of all memory nodes, ordered
+ * according to the (increasing) start address of the memory node. */
+ struct drm_mm_node head_node;
+ struct list_head unused_nodes;
+ int num_unused;
+ spinlock_t unused_lock;
+ unsigned int scan_check_range : 1;
+ unsigned scan_alignment;
+ unsigned long scan_color;
+ unsigned long scan_size;
+ unsigned long scan_hit_start;
+ unsigned long scan_hit_end;
+ unsigned scanned_blocks;
+ unsigned long scan_start;
+ unsigned long scan_end;
+ struct drm_mm_node *prev_scanned_node;
+
+ void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
+ unsigned long *start, unsigned long *end);
+};
+
+static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
+{
+ return node->allocated;
+}
+
+extern bool drm_mm_initialized(struct drm_mm *mm);
+extern unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node);
+extern unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node);
+extern unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node);
+extern unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node);
+#define drm_mm_for_each_node(entry, type, mm) list_for_each_entry(entry, type, \
+ &(mm)->head_node.node_list, \
+ node_list)
+#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
+ for (entry = (mm)->prev_scanned_node, \
+ next = entry ? list_entry(entry->node_list.next, \
+ struct drm_mm_node, node_list) : NULL; \
+ entry != NULL; entry = next, \
+ next = entry ? list_entry(entry->node_list.next, \
+ struct drm_mm_node, node_list) : NULL) \
+
+/* Note that we need to unroll list_for_each_entry in order to inline
+ * setting hole_start and hole_end on each iteration and keep the
+ * macro sane.
+ */
+#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
+ for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
+ (entry) && (&entry->hole_stack != &(mm)->hole_stack ? \
+ hole_start = drm_mm_hole_node_start(entry), \
+ hole_end = drm_mm_hole_node_end(entry), \
+ 1 : 0); \
+ entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
+
+/*
+ * Basic range manager support (drm_mm.c)
+ */
+extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
+ unsigned long start,
+ unsigned long size,
+ bool atomic);
+extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ int atomic);
+extern struct drm_mm_node *drm_mm_get_block_range_generic(
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ int atomic);
+extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment);
+
+extern struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment);
+
+extern struct drm_mm_node *drm_mm_get_block_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end);
+
+extern struct drm_mm_node *drm_mm_get_block_atomic_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end);
+
+extern int drm_mm_insert_node(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment);
+extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end);
+extern int drm_mm_insert_node_generic(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color);
+extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end);
+extern void drm_mm_put_block(struct drm_mm_node *cur);
+extern void drm_mm_remove_node(struct drm_mm_node *node);
+extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
+extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ bool best_match);
+extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
+ const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ bool best_match);
+extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ bool best_match);
+extern struct drm_mm_node *drm_mm_search_free_in_range(
+ const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ bool best_match);
+extern void drm_mm_init(struct drm_mm *mm,
+ unsigned long start,
+ unsigned long size);
+extern void drm_mm_takedown(struct drm_mm *mm);
+extern int drm_mm_clean(struct drm_mm *mm);
+extern int drm_mm_pre_get(struct drm_mm *mm);
+static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
+{
+ return block->mm;
+}
+
+void drm_mm_init_scan(struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color);
+void drm_mm_init_scan_with_range(struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end);
+int drm_mm_scan_add_block(struct drm_mm_node *node);
+int drm_mm_scan_remove_block(struct drm_mm_node *node);
+
+extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
+#ifdef CONFIG_DEBUG_FS
+int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
+#endif
+
+#endif /* _DRM_MM_H_ */
diff --git a/usr/src/uts/common/drm/drm_mode.h b/usr/src/uts/common/drm/drm_mode.h
new file mode 100644
index 0000000..cdaff66
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_mode.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
+ * Copyright (c) 2008 Red Hat Inc.
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * Copyright (c) 2007-2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_MODE_H
+#define _DRM_MODE_H
+
+#define DRM_DISPLAY_INFO_LEN 32
+#define DRM_CONNECTOR_NAME_LEN 32
+#define DRM_DISPLAY_MODE_LEN 32
+#define DRM_PROP_NAME_LEN 32
+
+#define DRM_MODE_TYPE_BUILTIN (1<<0)
+#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_PREFERRED (1<<3)
+#define DRM_MODE_TYPE_DEFAULT (1<<4)
+#define DRM_MODE_TYPE_USERDEF (1<<5)
+#define DRM_MODE_TYPE_DRIVER (1<<6)
+
+/* Video mode flags */
+/* bit compatible with the xorg definitions. */
+#define DRM_MODE_FLAG_PHSYNC (1<<0)
+#define DRM_MODE_FLAG_NHSYNC (1<<1)
+#define DRM_MODE_FLAG_PVSYNC (1<<2)
+#define DRM_MODE_FLAG_NVSYNC (1<<3)
+#define DRM_MODE_FLAG_INTERLACE (1<<4)
+#define DRM_MODE_FLAG_DBLSCAN (1<<5)
+#define DRM_MODE_FLAG_CSYNC (1<<6)
+#define DRM_MODE_FLAG_PCSYNC (1<<7)
+#define DRM_MODE_FLAG_NCSYNC (1<<8)
+#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
+#define DRM_MODE_FLAG_BCAST (1<<10)
+#define DRM_MODE_FLAG_PIXMUX (1<<11)
+#define DRM_MODE_FLAG_DBLCLK (1<<12)
+#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
+#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
+#define DRM_MODE_FLAG_3D_NONE (0<<14)
+#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
+#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
+#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
+#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14)
+#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14)
+#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
+#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
+#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
+
+
+/* DPMS flags */
+/* bit compatible with the xorg definitions. */
+#define DRM_MODE_DPMS_ON 0
+#define DRM_MODE_DPMS_STANDBY 1
+#define DRM_MODE_DPMS_SUSPEND 2
+#define DRM_MODE_DPMS_OFF 3
+
+/* Scaling mode options */
+#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or
+ software can still scale) */
+#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */
+#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
+#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
+
+/* Dithering mode options */
+#define DRM_MODE_DITHERING_OFF 0
+#define DRM_MODE_DITHERING_ON 1
+#define DRM_MODE_DITHERING_AUTO 2
+
+/* Dirty info options */
+#define DRM_MODE_DIRTY_OFF 0
+#define DRM_MODE_DIRTY_ON 1
+#define DRM_MODE_DIRTY_ANNOTATE 2
+
+struct drm_mode_modeinfo {
+ __u32 clock;
+ __u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
+ __u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
+
+ __u32 vrefresh;
+
+ __u32 flags;
+ __u32 type;
+ char name[DRM_DISPLAY_MODE_LEN];
+};
+
+struct drm_mode_card_res {
+ __u64 fb_id_ptr;
+ __u64 crtc_id_ptr;
+ __u64 connector_id_ptr;
+ __u64 encoder_id_ptr;
+ __u32 count_fbs;
+ __u32 count_crtcs;
+ __u32 count_connectors;
+ __u32 count_encoders;
+ __u32 min_width, max_width;
+ __u32 min_height, max_height;
+};
+
+struct drm_mode_crtc {
+ __u64 set_connectors_ptr;
+ __u32 count_connectors;
+
+ __u32 crtc_id; /**< Id */
+ __u32 fb_id; /**< Id of framebuffer */
+
+ __u32 x, y; /**< Position on the frameuffer */
+
+ __u32 gamma_size;
+ __u32 mode_valid;
+ struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
+#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
+
+/* Planes blend with or override other bits on the CRTC */
+struct drm_mode_set_plane {
+ __u32 plane_id;
+ __u32 crtc_id;
+ __u32 fb_id; /* fb object contains surface format type */
+ __u32 flags;
+
+ /* Signed dest location allows it to be partially off screen */
+ __s32 crtc_x, crtc_y;
+ __u32 crtc_w, crtc_h;
+
+ /* Source values are 16.16 fixed point */
+ __u32 src_x, src_y;
+ __u32 src_h, src_w;
+};
+
+struct drm_mode_get_plane {
+ __u32 plane_id;
+
+ __u32 crtc_id;
+ __u32 fb_id;
+
+ __u32 possible_crtcs;
+ __u32 gamma_size;
+
+ __u32 count_format_types;
+ __u64 format_type_ptr;
+};
+
+struct drm_mode_get_plane_res {
+ __u64 plane_id_ptr;
+ __u32 count_planes;
+};
+
+#define DRM_MODE_ENCODER_NONE 0
+#define DRM_MODE_ENCODER_DAC 1
+#define DRM_MODE_ENCODER_TMDS 2
+#define DRM_MODE_ENCODER_LVDS 3
+#define DRM_MODE_ENCODER_TVDAC 4
+#define DRM_MODE_ENCODER_VIRTUAL 5
+#define DRM_MODE_ENCODER_DSI 6
+#define DRM_MODE_ENCODER_DPMST 7
+
+struct drm_mode_get_encoder {
+ __u32 encoder_id;
+ __u32 encoder_type;
+
+ __u32 crtc_id; /**< Id of crtc */
+
+ __u32 possible_crtcs;
+ __u32 possible_clones;
+};
+
+/* This is for connectors with multiple signal types. */
+/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
+#define DRM_MODE_SUBCONNECTOR_Automatic 0
+#define DRM_MODE_SUBCONNECTOR_Unknown 0
+#define DRM_MODE_SUBCONNECTOR_DVID 3
+#define DRM_MODE_SUBCONNECTOR_DVIA 4
+#define DRM_MODE_SUBCONNECTOR_Composite 5
+#define DRM_MODE_SUBCONNECTOR_SVIDEO 6
+#define DRM_MODE_SUBCONNECTOR_Component 8
+#define DRM_MODE_SUBCONNECTOR_SCART 9
+
+#define DRM_MODE_CONNECTOR_Unknown 0
+#define DRM_MODE_CONNECTOR_VGA 1
+#define DRM_MODE_CONNECTOR_DVII 2
+#define DRM_MODE_CONNECTOR_DVID 3
+#define DRM_MODE_CONNECTOR_DVIA 4
+#define DRM_MODE_CONNECTOR_Composite 5
+#define DRM_MODE_CONNECTOR_SVIDEO 6
+#define DRM_MODE_CONNECTOR_LVDS 7
+#define DRM_MODE_CONNECTOR_Component 8
+#define DRM_MODE_CONNECTOR_9PinDIN 9
+#define DRM_MODE_CONNECTOR_DisplayPort 10
+#define DRM_MODE_CONNECTOR_HDMIA 11
+#define DRM_MODE_CONNECTOR_HDMIB 12
+#define DRM_MODE_CONNECTOR_TV 13
+#define DRM_MODE_CONNECTOR_eDP 14
+#define DRM_MODE_CONNECTOR_VIRTUAL 15
+#define DRM_MODE_CONNECTOR_DSI 16
+
+struct drm_mode_get_connector {
+
+ __u64 encoders_ptr;
+ __u64 modes_ptr;
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+
+ __u32 count_modes;
+ __u32 count_props;
+ __u32 count_encoders;
+
+ __u32 encoder_id; /**< Current Encoder */
+ __u32 connector_id; /**< Id */
+ __u32 connector_type;
+ __u32 connector_type_id;
+
+ __u32 connection;
+ __u32 mm_width, mm_height; /**< HxW in millimeters */
+ __u32 subpixel;
+};
+
+#define DRM_MODE_PROP_PENDING (1<<0)
+#define DRM_MODE_PROP_RANGE (1<<1)
+#define DRM_MODE_PROP_IMMUTABLE (1<<2)
+#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
+#define DRM_MODE_PROP_BLOB (1<<4)
+#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
+
+/* non-extended types: legacy bitmask, one bit per type: */
+#define DRM_MODE_PROP_LEGACY_TYPE ( \
+ DRM_MODE_PROP_RANGE | \
+ DRM_MODE_PROP_ENUM | \
+ DRM_MODE_PROP_BLOB | \
+ DRM_MODE_PROP_BITMASK)
+
+/* extended-types: rather than continue to consume a bit per type,
+ * grab a chunk of the bits to use as integer type id.
+ */
+#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0
+#define DRM_MODE_PROP_TYPE(n) ((n) << 6)
+#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
+#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
+
+struct drm_mode_property_enum {
+ __u64 value;
+ char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_mode_get_property {
+ __u64 values_ptr; /* values and blob lengths */
+ __u64 enum_blob_ptr; /* enum and blob id ptrs */
+
+ __u32 prop_id;
+ __u32 flags;
+ char name[DRM_PROP_NAME_LEN];
+
+ __u32 count_values;
+ __u32 count_enum_blobs;
+};
+
+struct drm_mode_connector_set_property {
+ __u64 value;
+ __u32 prop_id;
+ __u32 connector_id;
+};
+
+#define DRM_MODE_OBJECT_CRTC 0xcccccccc
+#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
+#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
+#define DRM_MODE_OBJECT_MODE 0xdededede
+#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
+#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
+#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+
+struct drm_mode_obj_get_properties {
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u32 count_props;
+ __u32 obj_id;
+ __u32 obj_type;
+};
+
+struct drm_mode_obj_set_property {
+ __u64 value;
+ __u32 prop_id;
+ __u32 obj_id;
+ __u32 obj_type;
+};
+
+struct drm_mode_get_blob {
+ __u32 blob_id;
+ __u32 length;
+ __u64 data;
+};
+
+struct drm_mode_fb_cmd {
+ __u32 fb_id;
+ __u32 width, height;
+ __u32 pitch;
+ __u32 bpp;
+ __u32 depth;
+ /* driver specific handle */
+ __u32 handle;
+};
+
+#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
+#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
+
+struct drm_mode_fb_cmd2 {
+ __u32 fb_id;
+ __u32 width, height;
+ __u32 pixel_format; /* fourcc code from drm_fourcc.h */
+ __u32 flags;
+
+ /*
+ * In case of planar formats, this ioctl allows up to 4
+ * buffer objects with offsets and pitches per plane.
+ * The pitch and offset order is dictated by the fourcc,
+ * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
+ *
+ * YUV 4:2:0 image with a plane of 8 bit Y samples
+ * followed by an interleaved U/V plane containing
+ * 8 bit 2x2 subsampled colour difference samples.
+ *
+ * So it would consist of Y as offset[0] and UV as
+ * offset[1]. Note that offset[0] will generally
+ * be 0.
+ *
+ * To accommodate tiled, compressed, etc formats, a per-plane
+ * modifier can be specified. The default value of zero
+ * indicates "native" format as specified by the fourcc.
+ * Vendor specific modifier token. This allows, for example,
+ * different tiling/swizzling pattern on different planes.
+ * See discussion above of DRM_FORMAT_MOD_xxx.
+ */
+ __u32 handles[4];
+ __u32 pitches[4]; /* pitch for each plane */
+ __u32 offsets[4]; /* offset of each plane */
+ __u64 modifier[4]; /* ie, tiling, compressed (per plane) */
+};
+
+#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
+#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
+#define DRM_MODE_FB_DIRTY_FLAGS 0x03
+
+#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
+
+/*
+ * Mark a region of a framebuffer as dirty.
+ *
+ * Some hardware does not automatically update display contents
+ * as a hardware or software draw to a framebuffer. This ioctl
+ * allows userspace to tell the kernel and the hardware what
+ * regions of the framebuffer have changed.
+ *
+ * The kernel or hardware is free to update more then just the
+ * region specified by the clip rects. The kernel or hardware
+ * may also delay and/or coalesce several calls to dirty into a
+ * single update.
+ *
+ * Userspace may annotate the updates, the annotates are a
+ * promise made by the caller that the change is either a copy
+ * of pixels or a fill of a single color in the region specified.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
+ * the number of updated regions are half of num_clips given,
+ * where the clip rects are paired in src and dst. The width and
+ * height of each one of the pairs must match.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
+ * promises that the region specified of the clip rects is filled
+ * completely with a single color as given in the color argument.
+ */
+
+struct drm_mode_fb_dirty_cmd {
+ __u32 fb_id;
+ __u32 flags;
+ __u32 color;
+ __u32 num_clips;
+ __u64 clips_ptr;
+};
+
+struct drm_mode_mode_cmd {
+ __u32 connector_id;
+ struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_CURSOR_BO (1<<0)
+#define DRM_MODE_CURSOR_MOVE (1<<1)
+#define DRM_MODE_CURSOR_FLAGS (DRM_MODE_CURSOR_BO|DRM_MODE_CURSOR_MOVE)
+
+/*
+ * depending on the value in flags different members are used.
+ *
+ * CURSOR_BO uses
+ * crtc
+ * width
+ * height
+ * handle - if 0 turns the cursor off
+ *
+ * CURSOR_MOVE uses
+ * crtc
+ * x
+ * y
+ */
+struct drm_mode_cursor {
+ __u32 flags;
+ __u32 crtc_id;
+ __s32 x;
+ __s32 y;
+ __u32 width;
+ __u32 height;
+ /* driver specific handle */
+ __u32 handle;
+};
+
+struct drm_mode_cursor2 {
+ __u32 flags;
+ __u32 crtc_id;
+ __s32 x;
+ __s32 y;
+ __u32 width;
+ __u32 height;
+ /* driver specific handle */
+ __u32 handle;
+ __s32 hot_x;
+ __s32 hot_y;
+};
+
+struct drm_mode_crtc_lut {
+ __u32 crtc_id;
+ __u32 gamma_size;
+
+ /* pointers to arrays */
+ __u64 red;
+ __u64 green;
+ __u64 blue;
+};
+
+#define DRM_MODE_PAGE_FLIP_EVENT 0x01
+#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
+#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
+
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * This ioctl will ask KMS to schedule a page flip for the specified
+ * crtc. Once any pending rendering targeting the specified fb (as of
+ * ioctl time) has completed, the crtc will be reprogrammed to display
+ * that fb after the next vertical refresh. The ioctl returns
+ * immediately, but subsequent rendering to the current fb will block
+ * in the execbuffer ioctl until the page flip happens. If a page
+ * flip is already pending as the ioctl is called, EBUSY will be
+ * returned.
+ *
+ * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
+ * request that drm sends back a vblank event (see drm.h: struct
+ * drm_event_vblank) when the page flip is done. The user_data field
+ * passed in with this ioctl will be returned as the user_data field
+ * in the vblank event struct.
+ *
+ * The reserved field must be zero until we figure out something
+ * clever to use it for.
+ */
+
+struct drm_mode_crtc_page_flip {
+ __u32 crtc_id;
+ __u32 fb_id;
+ __u32 flags;
+ __u32 reserved;
+ __u64 user_data;
+};
+
+/* create a dumb scanout buffer */
+struct drm_mode_create_dumb {
+ __u32 height;
+ __u32 width;
+ __u32 bpp;
+ __u32 flags;
+ /* handle, pitch, size will be returned */
+ __u32 handle;
+ __u32 pitch;
+ __u64 size;
+};
+
+/* set up for mmap of a dumb scanout buffer */
+struct drm_mode_map_dumb {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+};
+
+struct drm_mode_destroy_dumb {
+ __u32 handle;
+};
+
+/* page-flip flags are valid, plus: */
+#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
+#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
+#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
+
+struct drm_mode_atomic {
+ __u32 flags;
+ __u32 count_objs;
+ __u64 objs_ptr;
+ __u64 count_props_ptr;
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u64 reserved;
+ __u64 user_data;
+};
+
+/**
+ * Create a new 'blob' data property, copying length bytes from data pointer,
+ * and returning new blob ID.
+ */
+struct drm_mode_create_blob {
+ /** Pointer to data to copy. */
+ __u64 data;
+ /** Length of data to copy. */
+ __u32 length;
+ /** Return: new property ID. */
+ __u32 blob_id;
+};
+
+/**
+ * Destroy a user-created blob property.
+ */
+struct drm_mode_destroy_blob {
+ __u32 blob_id;
+};
+
+
+#endif
diff --git a/usr/src/uts/common/drm/drm_os_solaris.h b/usr/src/uts/common/drm/drm_os_solaris.h
new file mode 100644
index 0000000..7b5e63b
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_os_solaris.h
@@ -0,0 +1,119 @@
+/*
+ * file drm_os_solaris.h
+ *
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Solaris OS abstractions
+ */
+
+#ifndef _DRM_OS_SOLARIS_H
+#define _DRM_OS_SOLARIS_H
+
+/* Only Solaris builds should be here */
+#if defined(__sun)
+
+/* A couple of "hints" definitions not needed for Solaris drivers */
+#ifndef __user
+#define __user
+#endif
+#ifndef __force
+#define __force
+#endif
+#ifndef __must_check
+#define __must_check
+#endif
+
+typedef uint64_t drm_u64_t;
+
+/*
+ * Some defines that are best put in ioccom.h, but live here till then.
+ */
+
+/* Reverse ioctl command lookup. */
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+
+/* Mask values that would otherwise not be in drm.h or ioccom.h */
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+/* Solaris uses a bit for none, but calls it IOC_VOID */
+#define _IOC_NONE 1U
+#define _IOC_WRITE 2U
+#define _IOC_READ 4U
+
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+#if defined(__sun)
+/* Things that should be defined in drmP.h */
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#endif /* __sun */
+
+#define XFREE86_VERSION(major, minor, patch, snap) \
+ ((major << 16) | (minor << 8) | patch)
+
+#ifndef CONFIG_XFREE86_VERSION
+#define CONFIG_XFREE86_VERSION XFREE86_VERSION(4, 1, 0, 0)
+#endif
+
+#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4, 1, 0, 0)
+#define DRM_PROC_DEVICES "/proc/devices"
+#define DRM_PROC_MISC "/proc/misc"
+#define DRM_PROC_DRM "/proc/drm"
+#define DRM_DEV_DRM "/dev/drm"
+#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+#define DRM_DEV_UID 0
+#define DRM_DEV_GID 0
+#endif /* CONFIG_XFREE86_VERSION < XFREE86_VERSION(4, 1, 0, 0) */
+
+#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4, 1, 0, 0)
+#ifdef __OpenBSD__
+#define DRM_MAJOR 81
+#endif
+#if defined(__linux__) || defined(__NetBSD__)
+#define DRM_MAJOR 226
+#endif
+#define DRM_MAX_MINORi 15
+#endif /* CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0) */
+
+
+#ifdef _KERNEL
+/* Defines that are only relevant for kernel modules and drivers */
+
+#ifdef __lint
+/* Don't lint these macros. */
+#define BUG_ON(a)
+#define WARN_ON(a)
+#else
+#define BUG_ON(a) ASSERT(!(a))
+#define WARN_ON(a) do { \
+ if(a) drm_debug_print(CE_WARN, __func__, __LINE__, #a);\
+ } while (__lintzero)
+#endif /* __lint */
+
+#define BUG() BUG_ON(1)
+
+#endif /* _KERNEL */
+#endif /* __sun */
+#endif /* _DRM_OS_SOLARIS_H */
diff --git a/usr/src/uts/intel/io/drm/drm_pciids.h b/usr/src/uts/common/drm/drm_pciids.h
index 18176ed..699cfaf 100644
--- a/usr/src/uts/intel/io/drm/drm_pciids.h
+++ b/usr/src/uts/common/drm/drm_pciids.h
@@ -1,10 +1,16 @@
+/* BEGIN CSTYLED */
+
/*
* This file is auto-generated from the drm_pciids.txt in the DRM CVS
* Please contact dri-devel@lists.sf.net to add new cards to this list
*/
+
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
*/
#ifndef _DRM_PCIIDS_H_
@@ -112,6 +118,7 @@ extern "C" {
{0x1002, 0x596A, CHIP_RV280, "ATI Radeon RV280 9200"}, \
{0x1002, 0x596B, CHIP_RV280, "ATI Radeon RV280 9200"}, \
{0x1002, 0x5b60, CHIP_RV350, "ATI Radeon RV370 X300SE"}, \
+ {0x1002, 0x5b64, CHIP_RV380, "SUN XVR-300"}, \
{0x1002, 0x5c61, CHIP_RV280|RADEON_IS_MOBILITY, \
"ATI Radeon RV280 Mobility"}, \
{0x1002, 0x5c62, CHIP_RV280, "ATI Radeon RV280"}, \
@@ -184,6 +191,30 @@ extern "C" {
{0x1002, 0x4c4e, 0, "Rage Mobility L AGP 2X"}, \
{0, 0, 0, NULL}
+#define sisdrv_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define tdfx_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define viadrv_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define i810_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define i830_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define gamma_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define savage_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define ffb_PCI_IDS \
+ {0, 0, 0, NULL}
+
#define i915_PCI_IDS\
{0x8086, 0x2562, CHIP_I8XX, "Intel i845G GMCH"}, \
{0x8086, 0x3582, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
diff --git a/usr/src/uts/common/drm/drm_rect.h b/usr/src/uts/common/drm/drm_rect.h
new file mode 100644
index 0000000..2fdedf1
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_rect.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef DRM_RECT_H
+#define DRM_RECT_H
+
+/**
+ * DOC: rect utils
+ *
+ * Utility functions to help manage rectangular areas for
+ * clipping, scaling, etc. calculations.
+ */
+
+/**
+ * struct drm_rect - two dimensional rectangle
+ * @x1: horizontal starting coordinate (inclusive)
+ * @x2: horizontal ending coordinate (exclusive)
+ * @y1: vertical starting coordinate (inclusive)
+ * @y2: vertical ending coordinate (exclusive)
+ */
+struct drm_rect {
+ int x1, y1, x2, y2;
+};
+
+/**
+ * drm_rect_adjust_size - adjust the size of the rectangle
+ * @r: rectangle to be adjusted
+ * @dw: horizontal adjustment
+ * @dh: vertical adjustment
+ *
+ * Change the size of rectangle @r by @dw in the horizontal direction,
+ * and by @dh in the vertical direction, while keeping the center
+ * of @r stationary.
+ *
+ * Positive @dw and @dh increase the size, negative values decrease it.
+ */
+static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh)
+{
+ r->x1 -= dw >> 1;
+ r->y1 -= dh >> 1;
+ r->x2 += (dw + 1) >> 1;
+ r->y2 += (dh + 1) >> 1;
+}
+
+/**
+ * drm_rect_translate - translate the rectangle
+ * @r: rectangle to be tranlated
+ * @dx: horizontal translation
+ * @dy: vertical translation
+ *
+ * Move rectangle @r by @dx in the horizontal direction,
+ * and by @dy in the vertical direction.
+ */
+static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy)
+{
+ r->x1 += dx;
+ r->y1 += dy;
+ r->x2 += dx;
+ r->y2 += dy;
+}
+
+/**
+ * drm_rect_downscale - downscale a rectangle
+ * @r: rectangle to be downscaled
+ * @horz: horizontal downscale factor
+ * @vert: vertical downscale factor
+ *
+ * Divide the coordinates of rectangle @r by @horz and @vert.
+ */
+static inline void drm_rect_downscale(struct drm_rect *r, int horz, int vert)
+{
+ r->x1 /= horz;
+ r->y1 /= vert;
+ r->x2 /= horz;
+ r->y2 /= vert;
+}
+
+/**
+ * drm_rect_width - determine the rectangle width
+ * @r: rectangle whose width is returned
+ *
+ * RETURNS:
+ * The width of the rectangle.
+ */
+static int drm_rect_width(const struct drm_rect *r)
+{
+ return r->x2 - r->x1;
+}
+
+/**
+ * drm_rect_height - determine the rectangle height
+ * @r: rectangle whose height is returned
+ *
+ * RETURNS:
+ * The height of the rectangle.
+ */
+static int drm_rect_height(const struct drm_rect *r)
+{
+ return r->y2 - r->y1;
+}
+
+/**
+ * drm_rect_visible - determine if the the rectangle is visible
+ * @r: rectangle whose visibility is returned
+ *
+ * RETURNS:
+ * %true if the rectangle is visible, %false otherwise.
+ */
+static int drm_rect_visible(const struct drm_rect *r)
+{
+ return drm_rect_width(r) > 0 && drm_rect_height(r) > 0;
+}
+
+/**
+ * drm_rect_equals - determine if two rectangles are equal
+ * @r1: first rectangle
+ * @r2: second rectangle
+ *
+ * RETURNS:
+ * %true if the rectangles are equal, %false otherwise.
+ */
+static int drm_rect_equals(const struct drm_rect *r1,
+ const struct drm_rect *r2)
+{
+ return r1->x1 == r2->x1 && r1->x2 == r2->x2 &&
+ r1->y1 == r2->y1 && r1->y2 == r2->y2;
+}
+
+int drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
+int drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
+ const struct drm_rect *clip,
+ int hscale, int vscale);
+int drm_rect_calc_hscale(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ int min_hscale, int max_hscale);
+int drm_rect_calc_vscale(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ int min_vscale, int max_vscale);
+int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
+ struct drm_rect *dst,
+ int min_hscale, int max_hscale);
+int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
+ struct drm_rect *dst,
+ int min_vscale, int max_vscale);
+void drm_rect_debug_print(const struct drm_rect *r, int fixed_point);
+
+#endif /* DRM_RECT_H */
diff --git a/usr/src/uts/common/io/drm/drm_sarea.h b/usr/src/uts/common/drm/drm_sarea.h
index 302edcd..7325558 100644
--- a/usr/src/uts/common/io/drm/drm_sarea.h
+++ b/usr/src/uts/common/drm/drm_sarea.h
@@ -1,8 +1,8 @@
-/*
+/**
* \file drm_sarea.h
* \brief SAREA definitions
*
- * \author Michel D�zer <michel@daenzer.net>
+ * \author Michel Dänzer <michel@daenzer.net>
*/
/*
@@ -28,54 +28,55 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/* BEGIN CSTYLED */
-#ifndef _DRM_SAREA_H
-#define _DRM_SAREA_H
-#pragma ident "%Z%%M% %I% %E% SMI"
+#ifndef _DRM_SAREA_H_
+#define _DRM_SAREA_H_
#include "drm.h"
/* SAREA area needs to be at least a page */
#if defined(__alpha__)
-#define SAREA_MAX 0x2000
+#define SAREA_MAX 0x2000U
#elif defined(__ia64__)
-#define SAREA_MAX 0x10000 /* 64kB */
+#define SAREA_MAX 0x10000U /* 64kB */
#else
/* Intel 830M driver needs at least 8k SAREA */
-#define SAREA_MAX 0x2000UL
+#define SAREA_MAX 0x2000U
#endif
/** Maximum number of drawables in the SAREA */
-#define SAREA_MAX_DRAWABLES 256
+#define SAREA_MAX_DRAWABLES 256
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
/** SAREA drawable */
-typedef struct drm_sarea_drawable {
+struct drm_sarea_drawable {
unsigned int stamp;
unsigned int flags;
-} drm_sarea_drawable_t;
+};
/** SAREA frame */
-typedef struct drm_sarea_frame {
+struct drm_sarea_frame {
unsigned int x;
unsigned int y;
unsigned int width;
unsigned int height;
unsigned int fullscreen;
-} drm_sarea_frame_t;
+};
/** SAREA */
-typedef struct drm_sarea {
+struct drm_sarea {
/** first thing is always the DRM locking structure */
- drm_hw_lock_t lock;
+ struct drm_hw_lock lock;
/** \todo Use readers/writer lock for drm_sarea::drawable_lock */
- drm_hw_lock_t drawable_lock;
- drm_sarea_drawable_t drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
- drm_sarea_frame_t frame; /**< frame */
+ struct drm_hw_lock drawable_lock;
+ struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
+ struct drm_sarea_frame frame; /**< frame */
drm_context_t dummy_context;
-} drm_sarea_t;
+};
+
+typedef struct drm_sarea_drawable drm_sarea_drawable_t;
+typedef struct drm_sarea_frame drm_sarea_frame_t;
+typedef struct drm_sarea drm_sarea_t;
-/* END CSTYLED */
-#endif /* _DRM_SAREA_H */
+#endif /* _DRM_SAREA_H_ */
diff --git a/usr/src/uts/common/drm/drm_sun_i2c.h b/usr/src/uts/common/drm/drm_sun_i2c.h
new file mode 100644
index 0000000..4482543
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_sun_i2c.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __DRM_I2C_H__
+#define __DRM_I2C_H__
+
+#include <sys/ksynch.h>
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/cmn_err.h>
+#include "drm.h"
+
+struct i2c_adapter;
+struct i2c_msg;
+
+struct i2c_algorithm {
+ int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
+ u32 (*functionality) (struct i2c_adapter *);
+};
+
+extern struct i2c_algorithm i2c_bit_algo;
+
+struct i2c_adapter {
+ struct i2c_algorithm *algo;
+ kmutex_t bus_lock;
+ clock_t timeout;
+ int retries;
+ char name[64];
+ void *data;
+ void (*setsda) (void *data, int state);
+ void (*setscl) (void *data, int state);
+ int (*getsda) (void *data);
+ int (*getscl) (void *data);
+ void *algo_data;
+ clock_t udelay;
+};
+
+#define I2C_M_RD 0x01
+#define I2C_M_NOSTART 0x02
+
+struct i2c_msg {
+ u16 addr;
+ u16 flags;
+ u16 len;
+ u8 *buf;
+};
+
+extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
+extern int i2c_bit_add_bus(struct i2c_adapter *adap);
+
+#endif /* __DRM_I2C_H__ */
diff --git a/usr/src/uts/common/drm/drm_sun_idr.h b/usr/src/uts/common/drm/drm_sun_idr.h
new file mode 100644
index 0000000..bf88b8c
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_sun_idr.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __DRM_IDR_H__
+#define __DRM_IDR_H__
+
+#include <sys/avl.h>
+
+struct idr_used_id {
+ struct avl_node link;
+ uint32_t id;
+ void *obj;
+};
+
+struct idr_free_id {
+ struct idr_free_id *next;
+ uint32_t id;
+};
+
+struct idr_free_id_range {
+ struct idr_free_id_range *next;
+ uint32_t start;
+ uint32_t end;
+ uint32_t min_unused_id;
+ struct idr_free_id *free_ids;
+};
+
+struct idr {
+ struct avl_tree used_ids;
+ struct idr_free_id_range *free_id_ranges;
+ kmutex_t lock;
+};
+
+extern void idr_init(struct idr *idrp);
+extern int idr_get_new_above(struct idr *idrp, void *obj, int start, int *newid);
+extern void* idr_find(struct idr *idrp, uint32_t id);
+extern int idr_remove(struct idr *idrp, uint32_t id);
+extern void* idr_replace(struct idr *idrp, void *obj, uint32_t id);
+extern int idr_pre_get(struct idr *idrp, int flag);
+extern int idr_for_each(struct idr *idrp, int (*fn)(int id, void *obj, void *data), void *data);
+extern void idr_remove_all(struct idr *idrp);
+extern void idr_destroy(struct idr* idrp);
+
+#define DRM_GEM_OBJIDR_HASHNODE 1024
+
+struct idr_list {
+ struct idr_list *next, *prev;
+ void *obj;
+ uint32_t handle;
+ caddr_t contain_ptr;
+};
+
+#define idr_list_for_each(entry, head) \
+ for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) \
+ list_for_each(entry, &(head)->next[key])
+
+extern int idr_list_pre_get(struct idr_list *head, int flag);
+extern void idr_list_init(struct idr_list *head);
+extern int idr_list_get_new_above(struct idr_list *head,
+ void *obj,
+ int *handlep);
+extern void *idr_list_find(struct idr_list *head, uint32_t name);
+extern int idr_list_remove(struct idr_list *head, uint32_t name);
+extern void idr_list_free(struct idr_list *head);
+extern int idr_list_empty(struct idr_list *head);
+#endif /* __DRM_IDR_H__ */
diff --git a/usr/src/uts/common/drm/drm_sun_pci.h b/usr/src/uts/common/drm/drm_sun_pci.h
new file mode 100644
index 0000000..6485df2
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_sun_pci.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __DRM_SUN_PCI_H__
+#define __DRM_SUN_PCI_H__
+
+#include <sys/sunddi.h>
+#include "drm_linux.h"
+#define PCI_CONFIG_REGION_NUMS 6
+
+struct pci_config_region {
+ unsigned long start;
+ unsigned long size;
+};
+
+struct pci_dev {
+ struct drm_device *dev;
+ ddi_acc_handle_t pci_cfg_acc_handle;
+
+ uint16_t vendor;
+ uint16_t device;
+ struct pci_config_region regions[PCI_CONFIG_REGION_NUMS];
+ int domain;
+ int bus;
+ int slot;
+ int func;
+ int irq;
+
+ ddi_iblock_cookie_t intr_block;
+
+ int msi_enabled;
+ ddi_intr_handle_t *msi_handle;
+ int msi_size;
+ int msi_actual;
+ uint_t msi_pri;
+ int msi_flag;
+};
+
+#define pci_resource_start(pdev, bar) ((pdev)->regions[(bar)].start)
+#define pci_resource_len(pdev, bar) ((pdev)->regions[(bar)].size)
+#define pci_resource_end(pdev, bar) \
+ ((pci_resource_len((pdev), (bar)) == 0 && \
+ pci_resource_start((pdev), (bar)) == 0) ? 0 : \
+ (pci_resource_start((pdev), (bar)) + \
+ pci_resource_len((pdev), (bar)) - 1))
+
+extern uint8_t* pci_map_rom(struct pci_dev *pdev, size_t *size);
+extern void pci_unmap_rom(struct pci_dev *pdev, uint8_t *base);
+extern void pci_read_config_byte(struct pci_dev *dev, int where, u8 *val);
+extern void pci_read_config_word(struct pci_dev *dev, int where, u16 *val);
+extern void pci_read_config_dword(struct pci_dev *dev, int where, u32 *val);
+extern void pci_write_config_byte(struct pci_dev *dev, int where, u8 val);
+extern void pci_write_config_word(struct pci_dev *dev, int where, u16 val);
+extern void pci_write_config_dword(struct pci_dev *dev, int where, u32 val);
+
+extern int pci_find_capability(struct pci_dev *pdev, int capid);
+extern struct pci_dev * pci_dev_create(struct drm_device *dev);
+extern void pci_dev_destroy(struct pci_dev *pdev);
+
+#endif /* __DRM_SUN_PCI_H__ */
diff --git a/usr/src/uts/common/drm/drm_sun_timer.h b/usr/src/uts/common/drm/drm_sun_timer.h
new file mode 100644
index 0000000..c357805
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_sun_timer.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __DRM_TIMER_H__
+#define __DRM_TIMER_H__
+
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/ksynch.h>
+#include "drm_linux_list.h"
+#include "drm_linux.h"
+
+#define del_timer_sync del_timer
+
+struct timer_list {
+ struct list_head *head;
+ void (*func)(void *);
+ void *arg;
+ clock_t expires;
+ unsigned long expired_time;
+ timeout_id_t timer_id;
+ kmutex_t lock;
+};
+
+extern void init_timer(struct timer_list *timer);
+extern void destroy_timer(struct timer_list *timer);
+extern void setup_timer(struct timer_list *timer, void (*func)(void *), void *arg);
+extern void mod_timer(struct timer_list *timer, clock_t expires);
+extern void del_timer(struct timer_list *timer);
+extern void test_set_timer(struct timer_list *timer, clock_t expires);
+
+#endif /* __DRM_TIMER_H__ */
diff --git a/usr/src/uts/common/drm/drm_sun_workqueue.h b/usr/src/uts/common/drm/drm_sun_workqueue.h
new file mode 100644
index 0000000..a7e40eb
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_sun_workqueue.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __DRM_SUN_WORKQUEUE_H__
+#define __DRM_SUN_WORKQUEUE_H__
+
+typedef void (* taskq_func_t)(void *);
+
+#define INIT_WORK(work, func) \
+ init_work((work), ((taskq_func_t)(func)))
+
+struct work_struct {
+ void (*func) (void *);
+};
+
+struct workqueue_struct {
+ ddi_taskq_t *taskq;
+ char *name;
+};
+
+extern int __queue_work(struct workqueue_struct *wq, struct work_struct *work);
+#define queue_work (void)__queue_work
+extern void init_work(struct work_struct *work, void (*func)(void *));
+extern struct workqueue_struct *create_workqueue(dev_info_t *dip, char *name);
+extern void destroy_workqueue(struct workqueue_struct *wq);
+extern void cancel_delayed_work(struct workqueue_struct *wq);
+extern void flush_workqueue(struct workqueue_struct *wq);
+
+#endif /* __DRM_SUN_WORKQUEUE_H__ */
diff --git a/usr/src/uts/common/drm/drm_sunmod.h b/usr/src/uts/common/drm/drm_sunmod.h
new file mode 100644
index 0000000..ec4de5e
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_sunmod.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Common misc module interfaces of DRM under Solaris
+ */
+
+/*
+ * I915 DRM Driver for Solaris
+ *
+ * This driver provides the hardware 3D acceleration support for Intel
+ * integrated video devices (e.g. i8xx/i915/i945 series chipsets), under the
+ * DRI (Direct Rendering Infrastructure). DRM (Direct Rendering Manager) here
+ * means the kernel device driver in DRI.
+ *
+ * I915 driver is a device dependent driver only, it depends on a misc module
+ * named drm for generic DRM operations.
+ *
+ * This driver also calls into gfx and agpmaster misc modules respectively for
+ * generic graphics operations and AGP master device support.
+ */
+
+#ifndef _SYS_DRM_SUNMOD_H_
+#define _SYS_DRM_SUNMOD_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/conf.h>
+#include <sys/kmem.h>
+#include <sys/visual_io.h>
+#include <sys/fbio.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/open.h>
+#include <sys/modctl.h>
+#include <sys/pci.h>
+#include <sys/kd.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/sunldi.h>
+#include <sys/mkdev.h>
+#include <sys/gfx_private.h>
+#include <sys/agpgart.h>
+#include <sys/agp/agpdefs.h>
+#include <sys/agp/agpmaster_io.h>
+#include "drmP.h"
+#include <sys/modctl.h>
+
+
+/* graphics name for the common graphics minor node */
+#define GFX_NAME "gfx"
+
+/*
+ * softstate for DRM module
+ */
+typedef struct drm_instance_state {
+ kmutex_t mis_lock;
+ kmutex_t dis_ctxlock;
+ major_t mis_major;
+ dev_info_t *mis_dip;
+ drm_device_t *mis_devp;
+ ddi_acc_handle_t mis_cfg_hdl;
+ agp_master_softc_t *mis_agpm; /* agpmaster softstate ptr */
+ gfxp_vgatext_softc_ptr_t mis_gfxp; /* gfx softstate */
+} drm_inst_state_t;
+
+
+struct drm_inst_state_list {
+ drm_inst_state_t disl_state;
+ struct drm_inst_state_list *disl_next;
+
+};
+typedef struct drm_inst_state_list drm_inst_list_t;
+
+extern struct list_head drm_iomem_list;
+
+static int drm_sun_open(dev_t *, int, int, cred_t *);
+static int drm_sun_close(dev_t, int, int, cred_t *);
+static int drm_sun_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
+static int drm_sun_devmap(dev_t, devmap_cookie_t,
+ offset_t, size_t, size_t *, uint_t);
+static int drm_sun_chpoll(dev_t, short, int, short *, struct pollhead **);
+static int drm_sun_read(dev_t, struct uio *, cred_t *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_DRM_SUNMOD_H_ */
diff --git a/usr/src/uts/intel/io/drm/i915_drm.h b/usr/src/uts/common/drm/i915_drm.h
index e6b967a..459839d 100644
--- a/usr/src/uts/intel/io/drm/i915_drm.h
+++ b/usr/src/uts/common/drm/i915_drm.h
@@ -1,10 +1,12 @@
-/* BEGIN CSTYLED */
+/*
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ */
/*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2013, Intel Corporation.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@@ -12,11 +14,11 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
@@ -24,23 +26,20 @@
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
*/
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
+#ifndef _I915_DRM_H_
+#define _I915_DRM_H_
-#ifndef _I915_DRM_H
-#define _I915_DRM_H
+#include <drm/drm.h>
+/* Need to make sure we have this included before going on */
+#include <drm/drm_os_solaris.h>
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
-#include "drm.h"
-
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@@ -72,7 +71,7 @@ typedef struct _drm_i915_init {
} drm_i915_init_t;
typedef struct _drm_i915_sarea {
- drm_tex_region_t texList[I915_NR_TEX_REGIONS + 1];
+ struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
int last_upload; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
@@ -122,29 +121,31 @@ typedef struct _drm_i915_sarea {
int pipeB_y;
int pipeB_w;
int pipeB_h;
-
int pad1;
- /* Triple buffering */
- drm_handle_t third_handle;
- int third_offset;
- int third_size;
- unsigned int third_tiled;
-
- unsigned int front_bo_handle;
- unsigned int back_bo_handle;
- unsigned int third_bo_handle;
- unsigned int depth_bo_handle;
-} drm_i915_sarea_t;
-/* Driver specific fence types and classes.
- */
+ /* fill out some space for old userspace triple buffer */
+ drm_handle_t unused_handle;
+ __u32 unused1, unused2, unused3;
+
+ /* buffer object handles for static buffers. May change
+ * over the lifetime of the client.
+ */
+ __u32 front_bo_handle;
+ __u32 back_bo_handle;
+ __u32 unused_bo_handle;
+ __u32 depth_bo_handle;
+
+} drm_i915_sarea_t;
-/* The only fence class we support */
-#define DRM_I915_FENCE_CLASS_ACCEL 0
-/* Fence type that guarantees read-write flush */
-#define DRM_I915_FENCE_TYPE_RW 2
-/* MI_FLUSH programmed just before the fence */
-#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000
+/* due to userspace building against these headers we need some compat here */
+#define planeA_x pipeA_x
+#define planeA_y pipeA_y
+#define planeA_w pipeA_w
+#define planeA_h pipeA_h
+#define planeB_x pipeB_x
+#define planeB_y pipeB_y
+#define planeB_w pipeB_w
+#define planeB_h pipeB_h
/* Flags for perf_boxes
*/
@@ -173,7 +174,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_SET_VBLANK_PIPE 0x0d
#define DRM_I915_GET_VBLANK_PIPE 0x0e
#define DRM_I915_VBLANK_SWAP 0x0f
-#define DRM_I915_HWS_ADDR 0x11
+#define DRM_I915_HWS_ADDR 0x11
#define DRM_I915_GEM_INIT 0x13
#define DRM_I915_GEM_EXECBUFFER 0x14
#define DRM_I915_GEM_PIN 0x15
@@ -192,6 +193,19 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_GET_TILING 0x22
#define DRM_I915_GEM_GET_APERTURE 0x23
#define DRM_I915_GEM_MMAP_GTT 0x24
+#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
+#define DRM_I915_GEM_MADVISE 0x26
+#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
+#define DRM_I915_OVERLAY_ATTRS 0x28
+#define DRM_I915_GEM_EXECBUFFER2 0x29
+#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
+#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
+#define DRM_I915_GEM_WAIT 0x2c
+#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
+#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
+#define DRM_I915_GEM_SET_CACHING 0x2f
+#define DRM_I915_GEM_GET_CACHING 0x30
+#define DRM_I915_REG_READ 0x31
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -199,7 +213,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
-#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
+#define DRM_IOCTL_I915_GETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
@@ -209,11 +223,15 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_HWS_ADDR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, drm_i915_hws_addr_t)
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
-#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
+#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
@@ -227,32 +245,38 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
-
-/* Asynchronous page flipping:
- */
-typedef struct drm_i915_flip {
- /*
- * This is really talking about planes, and we could rename it
- * except for the fact that some of the duplicated i915_drm.h files
- * out there check for HAVE_I915_FLIP and so might pick up this
- * version.
- */
- int pipes;
-} drm_i915_flip_t;
+#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
+#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
+#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
+#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
+#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
+#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
+#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
+#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
+
+#ifdef _MULTI_DATAMODEL
+#define I915_IOCTL_DEF(ioctl, _func, _flags, _copyin32, _copyout32) \
+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = _copyin32, .copyout32 = _copyout32}
+#else
+#define I915_IOCTL_DEF(ioctl, _func, _flags, _copyin32, _copyout32) \
+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = NULL, .copyout32 = NULL}
+#endif
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
*/
-typedef struct _drm_i915_batchbuffer {
+typedef struct drm_i915_batchbuffer {
int start; /* agp offset */
int used; /* nr bytes in use */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
- drm_clip_rect_t __user *cliprects; /* pointer to userspace cliprects */
+ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_batchbuffer_t;
-typedef struct _drm_i915_batchbuffer32 {
+typedef struct drm_i915_batchbuffer32 {
int start; /* agp offset */
int used; /* nr bytes in use */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
@@ -270,10 +294,10 @@ typedef struct _drm_i915_cmdbuffer {
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
- drm_clip_rect_t __user *cliprects; /* pointer to userspace cliprects */
+ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_cmdbuffer_t;
-typedef struct _drm_i915_cmdbuffer32 {
+typedef struct drm_i915_cmdbuffer32 {
caddr32_t buf; /* pointer to userspace command buffer */
int sz; /* nr bytes in buf */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
@@ -303,6 +327,27 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5
+#define I915_PARAM_NUM_FENCES_AVAIL 6
+#define I915_PARAM_HAS_OVERLAY 7
+#define I915_PARAM_HAS_PAGEFLIPPING 8
+#define I915_PARAM_HAS_EXECBUF2 9
+#define I915_PARAM_HAS_BSD 10
+#define I915_PARAM_HAS_BLT 11
+#define I915_PARAM_HAS_RELAXED_FENCING 12
+#define I915_PARAM_HAS_COHERENT_RINGS 13
+#define I915_PARAM_HAS_EXEC_CONSTANTS 14
+#define I915_PARAM_HAS_RELAXED_DELTA 15
+#define I915_PARAM_HAS_GEN7_SOL_RESET 16
+#define I915_PARAM_HAS_LLC 17
+#define I915_PARAM_HAS_ALIASING_PPGTT 18
+#define I915_PARAM_HAS_WAIT_TIMEOUT 19
+#define I915_PARAM_HAS_SEMAPHORES 20
+#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
+#define I915_PARAM_HAS_VEBOX 22
+#define I915_PARAM_HAS_SECURE_BATCHES 23
+#define I915_PARAM_HAS_PINNED_BATCHES 24
+#define I915_PARAM_HAS_EXEC_NO_RELOC 25
+#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
typedef struct drm_i915_getparam {
int param;
@@ -319,6 +364,7 @@ typedef struct drm_i915_getparam32 {
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
+#define I915_SETPARAM_NUM_USED_FENCES 4
typedef struct drm_i915_setparam {
int param;
@@ -358,7 +404,7 @@ typedef struct drm_i915_mem_init_heap {
* rotate):
*/
typedef struct drm_i915_mem_destroy_heap {
- int region;
+ int region;
} drm_i915_mem_destroy_heap_t;
/* Allow X server to configure which pipes to monitor for vblank signals
@@ -374,55 +420,25 @@ typedef struct drm_i915_vblank_pipe {
*/
typedef struct drm_i915_vblank_swap {
drm_drawable_t drawable;
- drm_vblank_seq_type_t seqtype;
+ enum drm_vblank_seq_type seqtype;
unsigned int sequence;
} drm_i915_vblank_swap_t;
-#define I915_MMIO_READ 0
-#define I915_MMIO_WRITE 1
-
-#define I915_MMIO_MAY_READ 0x1
-#define I915_MMIO_MAY_WRITE 0x2
-
-#define MMIO_REGS_IA_PRIMATIVES_COUNT 0
-#define MMIO_REGS_IA_VERTICES_COUNT 1
-#define MMIO_REGS_VS_INVOCATION_COUNT 2
-#define MMIO_REGS_GS_PRIMITIVES_COUNT 3
-#define MMIO_REGS_GS_INVOCATION_COUNT 4
-#define MMIO_REGS_CL_PRIMITIVES_COUNT 5
-#define MMIO_REGS_CL_INVOCATION_COUNT 6
-#define MMIO_REGS_PS_INVOCATION_COUNT 7
-#define MMIO_REGS_PS_DEPTH_COUNT 8
-
-typedef struct drm_i915_mmio_entry {
- unsigned int flag;
- unsigned int offset;
- unsigned int size;
-} drm_i915_mmio_entry_t;
-
-typedef struct drm_i915_mmio {
- unsigned int read_write:1;
- unsigned int reg:31;
- void __user *data;
-} drm_i915_mmio_t;
-
typedef struct drm_i915_hws_addr {
- uint64_t addr;
+ __u64 addr;
} drm_i915_hws_addr_t;
-
struct drm_i915_gem_init {
/**
* Beginning offset in the GTT to be managed by the DRM memory
* manager.
*/
- uint64_t gtt_start;
+ __u64 gtt_start;
/**
* Ending offset in the GTT to be managed by the DRM memory
* manager.
*/
- uint64_t gtt_end;
-
+ __u64 gtt_end;
};
struct drm_i915_gem_create {
@@ -431,94 +447,94 @@ struct drm_i915_gem_create {
*
* The (page-aligned) allocated size for the object will be returned.
*/
- uint64_t size;
+ __u64 size;
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
- uint32_t handle;
- uint32_t pad;
+ __u32 handle;
+ __u32 pad;
};
struct drm_i915_gem_pread {
/** Handle for the object being read. */
- uint32_t handle;
- uint32_t pad;
+ __u32 handle;
+ __u32 pad;
/** Offset into the object to read from */
- uint64_t offset;
+ __u64 offset;
/** Length of data to read */
- uint64_t size;
+ __u64 size;
/**
* Pointer to write the data into.
*
* This is a fixed-size type for 32/64 compatibility.
*/
- uint64_t data_ptr;
+ __u64 data_ptr;
};
struct drm_i915_gem_pwrite {
/** Handle for the object being written to. */
- uint32_t handle;
- uint32_t pad;
+ __u32 handle;
+ __u32 pad;
/** Offset into the object to write to */
- uint64_t offset;
+ __u64 offset;
/** Length of data to write */
- uint64_t size;
+ __u64 size;
/**
* Pointer to read the data from.
*
* This is a fixed-size type for 32/64 compatibility.
*/
- uint64_t data_ptr;
+ __u64 data_ptr;
};
struct drm_i915_gem_mmap {
/** Handle for the object being mapped. */
- uint32_t handle;
- uint32_t pad;
+ __u32 handle;
+ __u32 pad;
/** Offset in the object to map. */
- uint64_t offset;
+ __u64 offset;
/**
* Length of data to map.
*
* The value will be page-aligned.
*/
- uint64_t size;
+ __u64 size;
/**
* Returned pointer the data was mapped at.
*
* This is a fixed-size type for 32/64 compatibility.
*/
- uint64_t addr_ptr;
+ __u64 addr_ptr;
};
struct drm_i915_gem_mmap_gtt {
/** Handle for the object being mapped. */
- uint32_t handle;
- uint32_t pad;
+ __u32 handle;
+ __u32 pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
- uint64_t offset;
+ __u64 offset;
};
struct drm_i915_gem_set_domain {
/** Handle for the object */
- uint32_t handle;
+ __u32 handle;
/** New read domains */
- uint32_t read_domains;
+ __u32 read_domains;
/** New write domain */
- uint32_t write_domain;
+ __u32 write_domain;
};
struct drm_i915_gem_sw_finish {
/** Handle for the object */
- uint32_t handle;
+ __u32 handle;
};
struct drm_i915_gem_relocation_entry {
@@ -530,16 +546,16 @@ struct drm_i915_gem_relocation_entry {
* a relocation list for state buffers and not re-write it per
* exec using the buffer.
*/
- uint32_t target_handle;
+ __u32 target_handle;
/**
* Value to be added to the offset of the target buffer to make up
* the relocation entry.
*/
- uint32_t delta;
+ __u32 delta;
/** Offset in the buffer the relocation entry will be written into */
- uint64_t offset;
+ __u64 offset;
/**
* Offset value of the target buffer that the relocation entry was last
@@ -549,12 +565,12 @@ struct drm_i915_gem_relocation_entry {
* and writing the relocation. This value is written back out by
* the execbuffer ioctl when the relocation is written.
*/
- uint64_t presumed_offset;
+ __u64 presumed_offset;
/**
* Target memory domains read by this operation.
*/
- uint32_t read_domains;
+ __u32 read_domains;
/**
* Target memory domains written by this operation.
@@ -563,7 +579,7 @@ struct drm_i915_gem_relocation_entry {
* execbuffer operation, so that where there are conflicts,
* the application will get -EINVAL back.
*/
- uint32_t write_domain;
+ __u32 write_domain;
};
/** @{
@@ -594,26 +610,24 @@ struct drm_i915_gem_exec_object {
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
- uint32_t handle;
+ __u32 handle;
/** Number of relocations to be performed on this buffer */
- uint32_t relocation_count;
-
+ __u32 relocation_count;
/**
* Pointer to array of struct drm_i915_gem_relocation_entry containing
* the relocations to be performed in this buffer.
*/
- uint64_t relocs_ptr;
+ __u64 relocs_ptr;
/** Required alignment in graphics aperture */
- uint64_t alignment;
+ __u64 alignment;
/**
- * Returned value of the updated offset of the object, for future
- * presumed_offset writes.
- */
- uint64_t offset;
-
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ __u64 offset;
};
struct drm_i915_gem_execbuffer {
@@ -627,45 +641,173 @@ struct drm_i915_gem_execbuffer {
* a buffer is performing refer to buffers that have already appeared
* in the validate list.
*/
- uint64_t buffers_ptr;
- uint32_t buffer_count;
+ __u64 buffers_ptr;
+ __u32 buffer_count;
/** Offset in the batchbuffer to start execution from. */
- uint32_t batch_start_offset;
+ __u32 batch_start_offset;
/** Bytes used in batchbuffer from batch_start_offset */
- uint32_t batch_len;
- uint32_t DR1;
- uint32_t DR4;
- uint32_t num_cliprects;
+ __u32 batch_len;
+ __u32 DR1;
+ __u32 DR4;
+ __u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
- uint64_t cliprects_ptr;
+ __u64 cliprects_ptr;
};
+struct drm_i915_gem_exec_object2 {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ __u32 handle;
+
+ /** Number of relocations to be performed on this buffer */
+ __u32 relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ __u64 relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ __u64 alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ __u64 offset;
+
+#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
+#define EXEC_OBJECT_NEEDS_GTT (1<<1)
+#define EXEC_OBJECT_WRITE (1<<2)
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1)
+ __u64 flags;
+ __u64 rsvd1;
+ __u64 rsvd2;
+};
+
+struct drm_i915_gem_execbuffer2 {
+ /**
+ * List of gem_exec_object2 structs
+ */
+ __u64 buffers_ptr;
+ __u32 buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ __u32 batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ __u32 batch_len;
+ __u32 DR1;
+ __u32 DR4;
+ __u32 num_cliprects;
+ /** This is a struct drm_clip_rect *cliprects */
+ __u64 cliprects_ptr;
+#define I915_EXEC_RING_MASK (7<<0)
+#define I915_EXEC_DEFAULT (0<<0)
+#define I915_EXEC_RENDER (1<<0)
+#define I915_EXEC_BSD (2<<0)
+#define I915_EXEC_BLT (3<<0)
+#define I915_EXEC_VEBOX (4<<0)
+
+/* Used for switching the constants addressing mode on gen4+ RENDER ring.
+ * Gen6+ only supports relative addressing to dynamic state (default) and
+ * absolute addressing.
+ *
+ * These flags are ignored for the BSD and BLT rings.
+ */
+#define I915_EXEC_CONSTANTS_MASK (3<<6)
+#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
+#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
+#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
+ __u64 flags; /* currently unused */
+ __u64 rsvd1;
+ __u64 rsvd2;
+};
+
+/** Resets the SO write offset registers for transform feedback on gen7. */
+#define I915_EXEC_GEN7_SOL_RESET (1<<8)
+
+/** Request a privileged ("secure") batch buffer. Note only available for
+ * DRM_ROOT_ONLY | DRM_MASTER processes.
+ */
+#define I915_EXEC_SECURE (1<<9)
+
+/** Inform the kernel that the batch is and will always be pinned. This
+ * negates the requirement for a workaround to be performed to avoid
+ * an incoherent CS (such as can be found on 830/845). If this flag is
+ * not passed, the kernel will endeavour to make sure the batch is
+ * coherent with the CS before execution. If this flag is passed,
+ * userspace assumes the responsibility for ensuring the same.
+ */
+#define I915_EXEC_IS_PINNED (1<<10)
+
+/** Provide a hint to the kernel that the command stream and auxilliary
+ * state buffers already holds the correct presumed addresses and so the
+ * relocation process may be skipped if no buffers need to be moved in
+ * preparation for the execbuffer.
+ */
+#define I915_EXEC_NO_RELOC (1<<11)
+
+/** Use the reloc.handle as an index into the exec object array rather
+ * than as the per-file handle.
+ */
+#define I915_EXEC_HANDLE_LUT (1<<12)
+
+#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1)
+
+#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
+#define i915_execbuffer2_set_context_id(eb2, context) \
+ (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
+#define i915_execbuffer2_get_context_id(eb2) \
+ ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
+
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
- uint32_t handle;
- uint32_t pad;
+ __u32 handle;
+ __u32 pad;
/** alignment required within the aperture */
- uint64_t alignment;
+ __u64 alignment;
/** Returned GTT offset of the buffer. */
- uint64_t offset;
+ __u64 offset;
};
-
struct drm_i915_gem_unpin {
/** Handle of the buffer to be unpinned. */
- uint32_t handle;
- uint32_t pad;
+ __u32 handle;
+ __u32 pad;
};
struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */
- uint32_t handle;
+ __u32 handle;
- /** Return busy status (1 if busy, 0 if idle) */
- uint32_t busy;
+ /** Return busy status (1 if busy, 0 if idle).
+ * The high word is used to indicate on which rings the object
+ * currently resides:
+ * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
+ */
+ __u32 busy;
+};
+
+#define I915_CACHING_NONE 0
+#define I915_CACHING_CACHED 1
+
+struct drm_i915_gem_caching {
+ /**
+ * Handle of the buffer to set/get the caching level of. */
+ __u32 handle;
+
+ /**
+ * Cacheing level to apply or return value
+ *
+ * bits0-15 are for generic caching control (i.e. the above defined
+ * values). bits16-31 are reserved for platform-specific variations
+ * (e.g. l3$ caching on gen7). */
+ __u32 caching;
};
#define I915_TILING_NONE 0
@@ -679,10 +821,13 @@ struct drm_i915_gem_busy {
#define I915_BIT_6_SWIZZLE_9_10_11 4
/* Not seen by userland */
#define I915_BIT_6_SWIZZLE_UNKNOWN 5
+/* Seen by userland. */
+#define I915_BIT_6_SWIZZLE_9_17 6
+#define I915_BIT_6_SWIZZLE_9_10_17 7
struct drm_i915_gem_set_tiling {
/** Handle of the buffer to have its tiling state updated */
- uint32_t handle;
+ __u32 handle;
/**
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
@@ -696,47 +841,193 @@ struct drm_i915_gem_set_tiling {
*
* Buffer contents become undefined when changing tiling_mode.
*/
- uint32_t tiling_mode;
+ __u32 tiling_mode;
/**
* Stride in bytes for the object when in I915_TILING_X or
* I915_TILING_Y.
*/
- uint32_t stride;
+ __u32 stride;
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
- uint32_t swizzle_mode;
+ __u32 swizzle_mode;
};
struct drm_i915_gem_get_tiling {
/** Handle of the buffer to get tiling state for. */
- uint32_t handle;
+ __u32 handle;
/**
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*/
- uint32_t tiling_mode;
+ __u32 tiling_mode;
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
- uint32_t swizzle_mode;
+ __u32 swizzle_mode;
};
struct drm_i915_gem_get_aperture {
/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
- uint64_t aper_size;
+ __u64 aper_size;
/**
* Available space in the aperture used by i915_gem_execbuffer, in
* bytes
*/
- uint64_t aper_available_size;
+ __u64 aper_available_size;
+};
+
+struct drm_i915_get_pipe_from_crtc_id {
+ /** ID of CRTC being requested **/
+ __u32 crtc_id;
+
+ /** pipe of requested CRTC **/
+ __u32 pipe;
+};
+
+#define I915_MADV_WILLNEED 0
+#define I915_MADV_DONTNEED 1
+#define __I915_MADV_PURGED 2 /* internal state */
+
+struct drm_i915_gem_madvise {
+ /** Handle of the buffer to change the backing store advice */
+ __u32 handle;
+
+ /* Advice: either the buffer will be needed again in the near future,
+ * or wont be and could be discarded under memory pressure.
+ */
+ __u32 madv;
+
+ /** Whether the backing store still exists. */
+ __u32 retained;
+};
+
+/* flags */
+#define I915_OVERLAY_TYPE_MASK 0xff
+#define I915_OVERLAY_YUV_PLANAR 0x01
+#define I915_OVERLAY_YUV_PACKED 0x02
+#define I915_OVERLAY_RGB 0x03
+
+#define I915_OVERLAY_DEPTH_MASK 0xff00
+#define I915_OVERLAY_RGB24 0x1000
+#define I915_OVERLAY_RGB16 0x2000
+#define I915_OVERLAY_RGB15 0x3000
+#define I915_OVERLAY_YUV422 0x0100
+#define I915_OVERLAY_YUV411 0x0200
+#define I915_OVERLAY_YUV420 0x0300
+#define I915_OVERLAY_YUV410 0x0400
+
+#define I915_OVERLAY_SWAP_MASK 0xff0000
+#define I915_OVERLAY_NO_SWAP 0x000000
+#define I915_OVERLAY_UV_SWAP 0x010000
+#define I915_OVERLAY_Y_SWAP 0x020000
+#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
+
+#define I915_OVERLAY_FLAGS_MASK 0xff000000
+#define I915_OVERLAY_ENABLE 0x01000000
+
+struct drm_intel_overlay_put_image {
+ /* various flags and src format description */
+ __u32 flags;
+ /* source picture description */
+ __u32 bo_handle;
+ /* stride values and offsets are in bytes, buffer relative */
+ __u16 stride_Y; /* stride for packed formats */
+ __u16 stride_UV;
+ __u32 offset_Y; /* offset for packet formats */
+ __u32 offset_U;
+ __u32 offset_V;
+ /* in pixels */
+ __u16 src_width;
+ __u16 src_height;
+ /* to compensate the scaling factors for partially covered surfaces */
+ __u16 src_scan_width;
+ __u16 src_scan_height;
+ /* output crtc description */
+ __u32 crtc_id;
+ __u16 dst_x;
+ __u16 dst_y;
+ __u16 dst_width;
+ __u16 dst_height;
};
-#endif /* _I915_DRM_H */
+/* flags */
+#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
+#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
+struct drm_intel_overlay_attrs {
+ __u32 flags;
+ __u32 color_key;
+ __s32 brightness;
+ __u32 contrast;
+ __u32 saturation;
+ __u32 gamma0;
+ __u32 gamma1;
+ __u32 gamma2;
+ __u32 gamma3;
+ __u32 gamma4;
+ __u32 gamma5;
+};
+
+/*
+ * Intel sprite handling
+ *
+ * Color keying works with a min/mask/max tuple. Both source and destination
+ * color keying is allowed.
+ *
+ * Source keying:
+ * Sprite pixels within the min & max values, masked against the color channels
+ * specified in the mask field, will be transparent. All other pixels will
+ * be displayed on top of the primary plane. For RGB surfaces, only the min
+ * and mask fields will be used; ranged compares are not allowed.
+ *
+ * Destination keying:
+ * Primary plane pixels that match the min value, masked against the color
+ * channels specified in the mask field, will be replaced by corresponding
+ * pixels from the sprite plane.
+ *
+ * Note that source & destination keying are exclusive; only one can be
+ * active on a given plane.
+ */
+
+#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
+#define I915_SET_COLORKEY_DESTINATION (1<<1)
+#define I915_SET_COLORKEY_SOURCE (1<<2)
+struct drm_intel_sprite_colorkey {
+ __u32 plane_id;
+ __u32 min_value;
+ __u32 channel_mask;
+ __u32 max_value;
+ __u32 flags;
+};
+
+struct drm_i915_gem_wait {
+ /** Handle of BO we shall wait on */
+ __u32 bo_handle;
+ __u32 flags;
+ /** Number of nanoseconds to wait, Returns time remaining. */
+ __s64 timeout_ns;
+};
+
+struct drm_i915_gem_context_create {
+ /* output: id of new context*/
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+struct drm_i915_gem_context_destroy {
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+struct drm_i915_reg_read {
+ __u64 offset;
+ __u64 val; /* Return value */
+};
+#endif /* _I915_DRM_H_ */
diff --git a/usr/src/uts/common/io/drm/LICENSE_DRM b/usr/src/uts/common/io/drm/LICENSE_DRM
new file mode 100644
index 0000000..2e6277e
--- /dev/null
+++ b/usr/src/uts/common/io/drm/LICENSE_DRM
@@ -0,0 +1,1349 @@
+
+File: ati_pcigart.c
+
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_agpsupport.c
+
+/**
+ * \file drm_agpsupport.c
+ * DRM support for AGP/GART backend
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_atomic.h
+
+/*
+ * Copyright 2004 Eric Anholt
+ * Copyright (c) 2009, 2012, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_auth.c
+
+/**
+ * \file drm_auth.c
+ * IOCTLs for authentication
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_bufs.c
+
+/**
+ * \file drm_bufs.c
+ * Generic buffer template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2012, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_cache.c
+
+/*
+ * Copyright(c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * Copyright (c) 2009, 2012, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files(the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice(including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_context.c
+
+/**
+ * \file drm_context.c
+ * IOCTLs for generic contexts
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_core.h
+
+/*
+ * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_crtc_helper.c
+
+/*
+ * Copyright (c) 2006-2008, 2013, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_crtc_helper.h
+
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_crtc.c
+
+/*
+ * Copyright (c) 2006-2008, 2013, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_crtc.h
+
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_dma.c
+
+/**
+ * \file drm_dma.c
+ * DMA IOCTL and function support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_dp_helper.c
+
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_dp_helper.h
+
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_dp_i2c_helper.c
+
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_drv.c
+
+/*
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_edid.c
+
+ *
+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
+ * FB layer.
+ * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_edid.h
+
+/*
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_fb_helper.c
+
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008, 2013, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_fb_helper.h
+
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008, 2012, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_fops.c
+
+/**
+ * \file drm_fops.c
+ * File operations for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Daryll Strauss <daryll@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_fourcc.h
+
+/*
+ * Copyright 2011, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_gem.c
+
+/*
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_ioctl.c
+
+/**
+ * \file drm_ioctl.c
+ * IOCTL processing for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_irq.c
+
+/**
+ * \file drm_irq.c
+ * IRQ support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_linux_list.h
+
+/*
+ * -
+ * Copyright 2003 Eric Anholt
+ * Copyright (c) 2009, 2012, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <anholt@FreeBSD.org>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_lock.c
+
+/**
+ * \file drm_lock.c
+ * IOCTLs for locking
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_memory.c
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2012, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_mm.c
+
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+-------------------------------------------------------------------------
+
+File: drm_mm.h
+
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+-------------------------------------------------------------------------
+
+File: drm_mode.h
+
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
+ * Copyright (c) 2008 Red Hat Inc.
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_modes.c
+
+/*
+ *
+ * Copyright © 1997-2003 by The XFree86 Project, Inc.
+ * Copyright © 2007 Dave Airlie
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2005-2006 Luc Verhaegen
+ * Copyright (c) 2001, Andy Ritger aritger@nvidia.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the name of the copyright holder(s)
+ * and author(s) shall not be used in advertising or otherwise to promote
+ * the sale, use or other dealings in this Software without prior written
+ * authorization from the copyright holder(s) and author(s).
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_pci.c
+
+/*-
+ * Copyright 2003 Eric Anholt.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_rect.c
+
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_rect.h
+
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_sarea.h
+
+/*
+ * \file drm_sarea.h
+ * \brief SAREA definitions
+ *
+ * \author Michel D�zer <michel@daenzer.net>
+ */
+
+/*
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_scatter.c
+
+/*
+ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_stub.c
+
+/*
+ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
+ *
+ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm.h
+
+/**
+ * \file drm.h
+ * Header for the Direct Rendering Manager
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ * \par Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drmP.h
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+-------------------------------------------------------------------------
diff --git a/usr/src/uts/common/io/drm/THIRDPARTYLICENSE.descrip b/usr/src/uts/common/io/drm/LICENSE_DRM.descrip
index 70fef96..70fef96 100644
--- a/usr/src/uts/common/io/drm/THIRDPARTYLICENSE.descrip
+++ b/usr/src/uts/common/io/drm/LICENSE_DRM.descrip
diff --git a/usr/src/uts/common/io/drm/THIRDPARTYLICENSE b/usr/src/uts/common/io/drm/THIRDPARTYLICENSE
deleted file mode 100644
index 30228d9..0000000
--- a/usr/src/uts/common/io/drm/THIRDPARTYLICENSE
+++ /dev/null
@@ -1,314 +0,0 @@
- Solaris Direct Rendering Manager kernel drivers and modules
-
---------------------------------------------------------------------------
-
-In addition to a Sun copyright, the following files:
-
-usr/src/uts/common/io/drm/drm.h
-usr/src/uts/common/io/drm/drmP.h
-usr/src/uts/common/io/drm/drm_agpsupport.c
-usr/src/uts/common/io/drm/drm_auth.c
-usr/src/uts/common/io/drm/drm_fops.c
-usr/src/uts/common/io/drm/drm_ioctl.c
-usr/src/uts/common/io/drm/drm_lock.c
-usr/src/uts/common/io/drm/drm_memory.c
-
-are covered by the following copyrights/license text:
-
-/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
---------------------------------------------------------------------------
-
-File:
-
-usr/src/uts/common/io/drm/drm_drawable.c
-
-is covered by the following copyrights/license text:
-
-/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
---------------------------------------------------------------------------
-
-In addition to a Sun copyright, the following files:
-
-usr/src/uts/common/io/drm/drm_irq.c
-usr/src/uts/common/io/drm/drm_pci.c
-
-are covered by the following copyrights/license text:
-
-/*
- * Copyright 2003 Eric Anholt
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
- * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
---------------------------------------------------------------------------
-
-File:
-
-usr/src/uts/common/io/drm/drm_sarea.h
-
-are covered by the following copyrights/license text:
-
-/*
- * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
---------------------------------------------------------------------------
-
-In addition to a Sun copyright, the following files:
-
-usr/src/uts/common/io/drm/drm_scatter.c
-usr/src/uts/i86pc/io/drm/i915_drv.c
-
-are covered by the following copyrights/license text:
-
-/*-
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
---------------------------------------------------------------------------
-
-In addition to a Sun copyright, the following files:
-
-usr/src/uts/common/io/drm/drm_bufs.c
-usr/src/uts/common/io/drm/drm_context.c
-usr/src/uts/common/io/drm/drm_dma.c
-usr/src/uts/common/io/drm/drm_drv.c
-
-are covered by the following copyrights/license text:
-
-/*
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
---------------------------------------------------------------------------
-
-File:
-
-usr/src/uts/common/io/drm/queue.h
-
-is covered by the following copyrights/license text:
-
-/*-
- * Copyright (c) 1991, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
---------------------------------------------------------------------------
-
-In addition to a Sun copyright, the following files:
-
-usr/src/uts/i86pc/io/drm/i915_dma.c
-usr/src/uts/i86pc/io/drm/i915_drm.h
-usr/src/uts/i86pc/io/drm/i915_drv.h
-usr/src/uts/i86pc/io/drm/i915_irq.c
-usr/src/uts/i86pc/io/drm/i915_mem.c
-
-are covered by the following copyrights/license text:
-
-/**************************************************************************
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
---------------------------------------------------------------------------
-
-File:
-
-usr/src/uts/common/io/drm/drm_pciids.txt
-
-is not covered by any copyright.
-
---------------------------------------------------------------------------
-
-All other files are covered by a Sun copyright and the CDDL:
-
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
diff --git a/usr/src/uts/common/io/drm/ati_pcigart.c b/usr/src/uts/common/io/drm/ati_pcigart.c
index 4c236c1..86a5987 100644
--- a/usr/src/uts/common/io/drm/ati_pcigart.c
+++ b/usr/src/uts/common/io/drm/ati_pcigart.c
@@ -1,7 +1,7 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
*/
+
/*
* ati_pcigart.h -- ATI PCI GART support -*- linux-c -*-
* Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
@@ -41,6 +41,23 @@
#define ATI_PCIGART_TABLE_SIZE 32768
int
+/* LINTED */
+drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
+{
+ drm_dma_handle_t *dmah;
+
+ if (dev->sg == NULL) {
+ DRM_ERROR("no scatter/gather memory!\n");
+ return (0);
+ }
+ dmah = dev->sg->dmah_gart;
+ dev->sg->dmah_gart = NULL;
+ if (dmah)
+ drm_pci_free(dmah);
+ return (1);
+}
+
+int
drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
{
unsigned long pages;
@@ -109,20 +126,3 @@ out:
return (1);
}
-
-/*ARGSUSED*/
-extern int
-drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
-{
- drm_dma_handle_t *dmah;
-
- if (dev->sg == NULL) {
- DRM_ERROR("no scatter/gather memory!\n");
- return (0);
- }
- dmah = dev->sg->dmah_gart;
- dev->sg->dmah_gart = NULL;
- if (dmah)
- drm_pci_free(dev, dmah);
- return (1);
-}
diff --git a/usr/src/uts/common/io/drm/drm.h b/usr/src/uts/common/io/drm/drm.h
deleted file mode 100644
index 87af6ed..0000000
--- a/usr/src/uts/common/io/drm/drm.h
+++ /dev/null
@@ -1,865 +0,0 @@
-/* BEGIN CSTYLED */
-
-/**
- * \file drm.h
- * Header for the Direct Rendering Manager
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- *
- * \par Acknowledgments:
- * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
- */
-
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/**
- * \mainpage
- *
- * The Direct Rendering Manager (DRM) is a device-independent kernel-level
- * device driver that provides support for the XFree86 Direct Rendering
- * Infrastructure (DRI).
- *
- * The DRM supports the Direct Rendering Infrastructure (DRI) in four major
- * ways:
- * -# The DRM provides synchronized access to the graphics hardware via
- * the use of an optimized two-tiered lock.
- * -# The DRM enforces the DRI security policy for access to the graphics
- * hardware by only allowing authenticated X11 clients access to
- * restricted regions of memory.
- * -# The DRM provides a generic DMA engine, complete with multiple
- * queues and the ability to detect the need for an OpenGL context
- * switch.
- * -# The DRM is extensible via the use of small device-specific modules
- * that rely extensively on the API exported by the DRM module.
- *
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _DRM_H_
-#define _DRM_H_
-
-#include <sys/types32.h>
-
-#ifndef __user
-#define __user
-#endif
-
-#ifdef __GNUC__
-# define DEPRECATED __attribute__ ((deprecated))
-#else
-# define DEPRECATED
-# define __volatile__ volatile
-#endif
-
-#if defined(__linux__)
-#include <asm/ioctl.h> /* For _IO* macros */
-#define DRM_IOCTL_NR(n) _IOC_NR(n)
-#define DRM_IOC_VOID _IOC_NONE
-#define DRM_IOC_READ _IOC_READ
-#define DRM_IOC_WRITE _IOC_WRITE
-#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
-#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
-#if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && defined(IN_MODULE)
-/* Prevent name collision when including sys/ioccom.h */
-#undef ioctl
-#include <sys/ioccom.h>
-#define ioctl(a,b,c) xf86ioctl(a,b,c)
-#else
-#include <sys/ioccom.h>
-#endif /* __FreeBSD__ && xf86ioctl */
-#define DRM_IOCTL_NR(n) ((n) & 0xff)
-#define DRM_IOC_VOID IOC_VOID
-#define DRM_IOC_READ IOC_OUT
-#define DRM_IOC_WRITE IOC_IN
-#define DRM_IOC_READWRITE IOC_INOUT
-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
-#endif
-
-/* Solaris-specific. */
-#if defined(__SOLARIS__) || defined(sun)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir, type, nr, size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used for X server compile */
-#if !defined(_KERNEL)
-#define _IO(type, nr) _IOC(_IOC_NONE, (type), (nr), 0)
-#define _IOR(type, nr, size) _IOC(_IOC_READ, (type), (nr), sizeof (size))
-#define _IOW(type, nr, size) _IOC(_IOC_WRITE, (type), (nr), sizeof (size))
-#define _IOWR(type, nr, size) _IOC(_IOC_READ|_IOC_WRITE, \
- (type), (nr), sizeof (size))
-
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-#endif /* _KERNEL */
-
-#define DRM_IOCTL_NR(n) _IOC_NR(n)
-#define DRM_IOC_VOID IOC_VOID
-#define DRM_IOC_READ IOC_OUT
-#define DRM_IOC_WRITE IOC_IN
-#define DRM_IOC_READWRITE IOC_INOUT
-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
-
-#endif /* __Solaris__ or sun */
-#define XFREE86_VERSION(major,minor,patch,snap) \
- ((major << 16) | (minor << 8) | patch)
-
-#ifndef CONFIG_XFREE86_VERSION
-#define CONFIG_XFREE86_VERSION XFREE86_VERSION(4,1,0,0)
-#endif
-
-#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
-#define DRM_PROC_DEVICES "/proc/devices"
-#define DRM_PROC_MISC "/proc/misc"
-#define DRM_PROC_DRM "/proc/drm"
-#define DRM_DEV_DRM "/dev/drm"
-#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
-#define DRM_DEV_UID 0
-#define DRM_DEV_GID 0
-#endif
-
-#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
-#ifdef __OpenBSD__
-#define DRM_MAJOR 81
-#endif
-#if defined(__linux__) || defined(__NetBSD__)
-#define DRM_MAJOR 226
-#endif
-#define DRM_MAX_MINOR 15
-#endif
-#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
-#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
-#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
-#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
-
-#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
-#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
-#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
-#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
-#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
-
-#if defined(__linux__)
-#if defined(__KERNEL__)
-typedef __u64 drm_u64_t;
-#else
-typedef unsigned long long drm_u64_t;
-#endif
-
-typedef unsigned int drm_handle_t;
-#else
-#include <sys/types.h>
-typedef uint64_t drm_u64_t;
-typedef unsigned long long drm_handle_t; /**< To mapped regions */
-#endif
-typedef unsigned int drm_context_t; /**< GLXContext handle */
-typedef unsigned int drm_drawable_t;
-typedef unsigned int drm_magic_t; /**< Magic for authentication */
-
-/**
- * Cliprect.
- *
- * \warning If you change this structure, make sure you change
- * XF86DRIClipRectRec in the server as well
- *
- * \note KW: Actually it's illegal to change either for
- * backwards-compatibility reasons.
- */
-typedef struct drm_clip_rect {
- unsigned short x1;
- unsigned short y1;
- unsigned short x2;
- unsigned short y2;
-} drm_clip_rect_t;
-
-/**
- * Drawable information.
- */
-typedef struct drm_drawable_info {
- unsigned int num_rects;
- drm_clip_rect_t *rects;
-} drm_drawable_info_t;
-
-/**
- * Texture region,
- */
-typedef struct drm_tex_region {
- unsigned char next;
- unsigned char prev;
- unsigned char in_use;
- unsigned char padding;
- unsigned int age;
-} drm_tex_region_t;
-
-/**
- * Hardware lock.
- *
- * The lock structure is a simple cache-line aligned integer. To avoid
- * processor bus contention on a multiprocessor system, there should not be any
- * other data stored in the same cache line.
- */
-typedef struct drm_hw_lock {
- __volatile__ unsigned int lock; /**< lock variable */
- char padding[60]; /**< Pad to cache line */
-} drm_hw_lock_t;
-
-/* This is beyond ugly, and only works on GCC. However, it allows me to use
- * drm.h in places (i.e., in the X-server) where I can't use size_t. The real
- * fix is to use uint32_t instead of size_t, but that fix will break existing
- * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems. That *will*
- * eventually happen, though. I chose 'unsigned long' to be the fallback type
- * because that works on all the platforms I know about. Hopefully, the
- * real fix will happen before that bites us.
- */
-
-#ifdef __SIZE_TYPE__
-# define DRM_SIZE_T __SIZE_TYPE__
-#else
-#if !defined(__SOLARIS__) && !defined(sun)
-# warning "__SIZE_TYPE__ not defined. Assuming sizeof(size_t) == sizeof(unsigned long)!"
-#endif
-# define DRM_SIZE_T unsigned long
-#endif
-
-/**
- * DRM_IOCTL_VERSION ioctl argument type.
- *
- * \sa drmGetVersion().
- */
-typedef struct drm_version {
- int version_major; /**< Major version */
- int version_minor; /**< Minor version */
- int version_patchlevel; /**< Patch level */
- DRM_SIZE_T name_len; /**< Length of name buffer */
- char __user *name; /**< Name of driver */
- DRM_SIZE_T date_len; /**< Length of date buffer */
- char __user *date; /**< User-space buffer to hold date */
- DRM_SIZE_T desc_len; /**< Length of desc buffer */
- char __user *desc; /**< User-space buffer to hold desc */
-} drm_version_t;
-
-/**
- * DRM_IOCTL_GET_UNIQUE ioctl argument type.
- *
- * \sa drmGetBusid() and drmSetBusId().
- */
-typedef struct drm_unique {
- DRM_SIZE_T unique_len; /**< Length of unique */
- char __user *unique; /**< Unique name for driver instantiation */
-} drm_unique_t;
-
-#undef DRM_SIZE_T
-
-typedef struct drm_list {
- int count; /**< Length of user-space structures */
- drm_version_t __user *version;
-} drm_list_t;
-
-typedef struct drm_block {
- int unused;
-} drm_block_t;
-
-/**
- * DRM_IOCTL_CONTROL ioctl argument type.
- *
- * \sa drmCtlInstHandler() and drmCtlUninstHandler().
- */
-typedef struct drm_control {
- enum {
- DRM_ADD_COMMAND,
- DRM_RM_COMMAND,
- DRM_INST_HANDLER,
- DRM_UNINST_HANDLER
- } func;
- int irq;
-} drm_control_t;
-
-/**
- * Type of memory to map.
- */
-typedef enum drm_map_type {
- _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
- _DRM_REGISTERS = 1, /**< no caching, no core dump */
- _DRM_SHM = 2, /**< shared, cached */
- _DRM_AGP = 3, /**< AGP/GART */
- _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
- _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
- _DRM_TTM = 6
-} drm_map_type_t;
-
-/**
- * Memory mapping flags.
- */
-typedef enum drm_map_flags {
- _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
- _DRM_READ_ONLY = 0x02,
- _DRM_LOCKED = 0x04, /**< shared, cached, locked */
- _DRM_KERNEL = 0x08, /**< kernel requires access */
- _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
- _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
- _DRM_REMOVABLE = 0x40, /**< Removable mapping */
- _DRM_DRIVER = 0x80 /**< Managed by driver */
-} drm_map_flags_t;
-
-typedef struct drm_ctx_priv_map {
- unsigned int ctx_id; /**< Context requesting private mapping */
- void *handle; /**< Handle of map */
-} drm_ctx_priv_map_t;
-
-/**
- * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
- * argument type.
- *
- * \sa drmAddMap().
- */
-typedef struct drm_map {
- unsigned long long offset; /**< Requested physical address (0 for SAREA)*/
- unsigned long long handle;
- /**< User-space: "Handle" to pass to mmap() */
- /**< Kernel-space: kernel-virtual address */
- unsigned long size; /**< Requested physical size (bytes) */
- drm_map_type_t type; /**< Type of memory to map */
- drm_map_flags_t flags; /**< Flags */
- int mtrr; /**< MTRR slot used */
- /* Private data */
-} drm_map_t;
-
-/**
- * DRM_IOCTL_GET_CLIENT ioctl argument type.
- */
-typedef struct drm_client {
- int idx; /**< Which client desired? */
- int auth; /**< Is client authenticated? */
- unsigned long pid; /**< Process ID */
- unsigned long uid; /**< User ID */
- unsigned long magic; /**< Magic */
- unsigned long iocs; /**< Ioctl count */
-} drm_client_t;
-
-typedef enum {
- _DRM_STAT_LOCK,
- _DRM_STAT_OPENS,
- _DRM_STAT_CLOSES,
- _DRM_STAT_IOCTLS,
- _DRM_STAT_LOCKS,
- _DRM_STAT_UNLOCKS,
- _DRM_STAT_VALUE, /**< Generic value */
- _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
- _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
-
- _DRM_STAT_IRQ, /**< IRQ */
- _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
- _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
- _DRM_STAT_DMA, /**< DMA */
- _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
- _DRM_STAT_MISSED /**< Missed DMA opportunity */
- /* Add to the *END* of the list */
-} drm_stat_type_t;
-
-/**
- * DRM_IOCTL_GET_STATS ioctl argument type.
- */
-typedef struct drm_stats {
- unsigned long count;
- struct {
- unsigned long value;
- drm_stat_type_t type;
- } data[15];
-} drm_stats_t;
-
-/**
- * Hardware locking flags.
- */
-typedef enum drm_lock_flags {
- _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
- _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
- _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
- _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
- /* These *HALT* flags aren't supported yet
- -- they will be used to support the
- full-screen DGA-like mode. */
- _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
- _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
-} drm_lock_flags_t;
-
-/**
- * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
- *
- * \sa drmGetLock() and drmUnlock().
- */
-typedef struct drm_lock {
- int context;
- drm_lock_flags_t flags;
-} drm_lock_t;
-
-/**
- * DMA flags
- *
- * \warning
- * These values \e must match xf86drm.h.
- *
- * \sa drm_dma.
- */
-typedef enum drm_dma_flags {
- /* Flags for DMA buffer dispatch */
- _DRM_DMA_BLOCK = 0x01, /**<
- * Block until buffer dispatched.
- *
- * \note The buffer may not yet have
- * been processed by the hardware --
- * getting a hardware lock with the
- * hardware quiescent will ensure
- * that the buffer has been
- * processed.
- */
- _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
- _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
-
- /* Flags for DMA buffer request */
- _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
- _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
- _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
-} drm_dma_flags_t;
-
-/**
- * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
- *
- * \sa drmAddBufs().
- */
-typedef enum {
- _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
- _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
- _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
- _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
- _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
-} drm_buf_flag;
-typedef struct drm_buf_desc {
- int count; /**< Number of buffers of this size */
- int size; /**< Size in bytes */
- int low_mark; /**< Low water mark */
- int high_mark; /**< High water mark */
- drm_buf_flag flags;
- unsigned long agp_start; /**<
- * Start address of where the AGP buffers are
- * in the AGP aperture
- */
-} drm_buf_desc_t;
-
-/**
- * DRM_IOCTL_INFO_BUFS ioctl argument type.
- */
-typedef struct drm_buf_info {
- int count; /**< Number of buffers described in list */
- drm_buf_desc_t __user *list; /**< List of buffer descriptions */
-} drm_buf_info_t;
-
-/**
- * DRM_IOCTL_FREE_BUFS ioctl argument type.
- */
-typedef struct drm_buf_free {
- int count;
- int __user *list;
-} drm_buf_free_t;
-
-/**
- * Buffer information
- *
- * \sa drm_buf_map.
- */
-typedef struct drm_buf_pub {
- int idx; /**< Index into the master buffer list */
- int total; /**< Buffer size */
- int used; /**< Amount of buffer in use (for DMA) */
- void __user *address; /**< Address of buffer */
-} drm_buf_pub_t;
-
-/**
- * DRM_IOCTL_MAP_BUFS ioctl argument type.
- */
-typedef struct drm_buf_map {
- int count; /**< Length of the buffer list */
-#if defined(__cplusplus)
- void __user *c_virtual;
-#else
- void __user *virtual; /**< Mmap'd area in user-virtual */
-#endif
- drm_buf_pub_t __user *list; /**< Buffer information */
- int fd;
-} drm_buf_map_t;
-
-/**
- * DRM_IOCTL_DMA ioctl argument type.
- *
- * Indices here refer to the offset into the buffer list in drm_buf_get.
- *
- * \sa drmDMA().
- */
-typedef struct drm_dma {
- int context; /**< Context handle */
- int send_count; /**< Number of buffers to send */
- int __user *send_indices; /**< List of handles to buffers */
- int __user *send_sizes; /**< Lengths of data to send */
- drm_dma_flags_t flags; /**< Flags */
- int request_count; /**< Number of buffers requested */
- int request_size; /**< Desired size for buffers */
- int __user *request_indices; /**< Buffer information */
- int __user *request_sizes;
- int granted_count; /**< Number of buffers granted */
-} drm_dma_t;
-
-typedef enum {
- _DRM_CONTEXT_PRESERVED = 0x01,
- _DRM_CONTEXT_2DONLY = 0x02
-} drm_ctx_flags_t;
-
-/**
- * DRM_IOCTL_ADD_CTX ioctl argument type.
- *
- * \sa drmCreateContext() and drmDestroyContext().
- */
-typedef struct drm_ctx {
- drm_context_t handle;
- drm_ctx_flags_t flags;
-} drm_ctx_t;
-
-/**
- * DRM_IOCTL_RES_CTX ioctl argument type.
- */
-typedef struct drm_ctx_res {
- int count;
- drm_ctx_t __user *contexts;
-} drm_ctx_res_t;
-
-
-/**
- * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
- */
-typedef struct drm_draw {
- drm_drawable_t handle;
-} drm_draw_t;
-
-/**
- * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
- */
-typedef enum {
- DRM_DRAWABLE_CLIPRECTS,
-} drm_drawable_info_type_t;
-
-typedef struct drm_update_draw {
- drm_drawable_t handle;
- unsigned int type;
- unsigned int num;
- unsigned long long data;
-} drm_update_draw_t;
-
-/**
- * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
- */
-typedef struct drm_auth {
- drm_magic_t magic;
-} drm_auth_t;
-
-/**
- * DRM_IOCTL_IRQ_BUSID ioctl argument type.
- *
- * \sa drmGetInterruptFromBusID().
- */
-typedef struct drm_irq_busid {
- int irq; /**< IRQ number */
- int busnum; /**< bus number */
- int devnum; /**< device number */
- int funcnum; /**< function number */
-} drm_irq_busid_t;
-
-typedef enum {
- _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
- _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
- _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
- _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
- _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
- _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
-} drm_vblank_seq_type_t;
-
-#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
-#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
- _DRM_VBLANK_NEXTONMISS)
-
-struct drm_wait_vblank_request {
- drm_vblank_seq_type_t type;
- unsigned int sequence;
- unsigned long signal;
-};
-
-struct drm_wait_vblank_reply {
- drm_vblank_seq_type_t type;
- unsigned int sequence;
- long tval_sec;
- long tval_usec;
-};
-
-/**
- * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
- *
- * \sa drmWaitVBlank().
- */
-typedef union drm_wait_vblank {
- struct drm_wait_vblank_request request;
- struct drm_wait_vblank_reply reply;
-} drm_wait_vblank_t;
-
-#define _DRM_PRE_MODESET 1
-#define _DRM_POST_MODESET 2
-
-/**
- * DRM_IOCTL_MODESET_CTL ioctl argument type
- *
- * \sa drmModesetCtl().
- */
-typedef struct drm_modeset_ctl {
- uint32_t crtc;
- uint32_t cmd;
-} drm_modeset_ctl_t;
-
-/**
- * DRM_IOCTL_AGP_ENABLE ioctl argument type.
- *
- * \sa drmAgpEnable().
- */
-typedef struct drm_agp_mode {
- unsigned long mode; /**< AGP mode */
-} drm_agp_mode_t;
-
-/**
- * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
- *
- * \sa drmAgpAlloc() and drmAgpFree().
- */
-typedef struct drm_agp_buffer {
- unsigned long size; /**< In bytes -- will round to page boundary */
- unsigned long handle; /**< Used for binding / unbinding */
- unsigned long type; /**< Type of memory to allocate */
- unsigned long physical; /**< Physical used by i810 */
-} drm_agp_buffer_t;
-
-/**
- * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
- *
- * \sa drmAgpBind() and drmAgpUnbind().
- */
-typedef struct drm_agp_binding {
- unsigned long handle; /**< From drm_agp_buffer */
- unsigned long offset; /**< In bytes -- will round to page boundary */
-} drm_agp_binding_t;
-
-/**
- * DRM_IOCTL_AGP_INFO ioctl argument type.
- *
- * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
- * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
- * drmAgpVendorId() and drmAgpDeviceId().
- */
-typedef struct drm_agp_info {
- int agp_version_major;
- int agp_version_minor;
- unsigned long mode;
- unsigned long aperture_base; /**< physical address */
- unsigned long aperture_size; /**< bytes */
- unsigned long memory_allowed; /**< bytes */
- unsigned long memory_used;
-
- /** \name PCI information */
- /*@{ */
- unsigned short id_vendor;
- unsigned short id_device;
- /*@} */
-} drm_agp_info_t;
-
-/**
- * DRM_IOCTL_SG_ALLOC ioctl argument type.
- */
-typedef struct drm_scatter_gather {
- unsigned long size; /**< In bytes -- will round to page boundary */
- unsigned long handle; /**< Used for mapping / unmapping */
-} drm_scatter_gather_t;
-
-/**
- * DRM_IOCTL_SET_VERSION ioctl argument type.
- */
-typedef struct drm_set_version {
- int drm_di_major;
- int drm_di_minor;
- int drm_dd_major;
- int drm_dd_minor;
-} drm_set_version_t;
-
-/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
-typedef struct drm_gem_close {
- /** Handle of the object to be closed. */
- uint32_t handle;
- uint32_t pad;
-} drm_gem_close_t;
-
-/** DRM_IOCTL_GEM_FLINK ioctl argument type */
-typedef struct drm_gem_flink {
- /** Handle for the object being named */
- uint32_t handle;
-
- /** Returned global name */
- uint32_t name;
-} drm_gem_flink_t;
-
-/** DRM_IOCTL_GEM_OPEN ioctl argument type */
-typedef struct drm_gem_open {
- /** Name of object being opened */
- uint32_t name;
-
- /** Returned handle for the object */
- uint32_t handle;
-
- /** Returned size of the object */
- uint64_t size;
-} drm_gem_open_t;
-
-/**
- * \name Ioctls Definitions
- */
-/*@{*/
-
-#define DRM_IOCTL_BASE 'd'
-#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
-#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
-#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
-#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
-
-#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
-#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
-#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t)
-#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
-#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, drm_map_t)
-#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, drm_client_t)
-#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, drm_stats_t)
-#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, drm_set_version_t)
-#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, drm_modeset_ctl_t)
-#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, drm_gem_close_t)
-#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, drm_gem_flink_t)
-#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, drm_gem_open_t)
-
-#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
-#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
-#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
-#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
-#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
-#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
-#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
-#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
-#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
-#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
-#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
-
-#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, drm_map_t)
-
-#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, drm_ctx_priv_map_t)
-#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, drm_ctx_priv_map_t)
-
-#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
-#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
-#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
-#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
-#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
-#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
-#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
-#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
-#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
-#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
-#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
-#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
-#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
-
-#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
-#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
-#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
-#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
-#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
-#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
-
-#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, drm_scatter_gather_t)
-#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, drm_scatter_gather_t)
-
-#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t)
-
-#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t)
-/*@}*/
-
-/**
- * Device specific ioctls should only be in their respective headers
- * The device specific ioctl range is from 0x40 to 0x99.
- * Generic IOCTLS restart at 0xA0.
- *
- * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
- * drmCommandReadWrite().
- */
-#define DRM_COMMAND_BASE 0x40
-#define DRM_COMMAND_END 0xA0
-
-#endif /* _DRM_H_ */
diff --git a/usr/src/uts/common/io/drm/drmP.h b/usr/src/uts/common/io/drm/drmP.h
deleted file mode 100644
index 16c02e5..0000000
--- a/usr/src/uts/common/io/drm/drmP.h
+++ /dev/null
@@ -1,1103 +0,0 @@
-/*
- * drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
- * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
- */
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
- */
-
-/*
- * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _DRMP_H
-#define _DRMP_H
-
-#include <sys/sysmacros.h>
-#include <sys/types.h>
-#include <sys/conf.h>
-#include <sys/modctl.h>
-#include <sys/stat.h>
-#include <sys/file.h>
-#include <sys/cmn_err.h>
-#include <sys/varargs.h>
-#include <sys/pci.h>
-#include <sys/ddi.h>
-#include <sys/sunddi.h>
-#include <sys/sunldi.h>
-#include <sys/pmem.h>
-#include <sys/agpgart.h>
-#include <sys/time.h>
-#include <sys/sysmacros.h>
-#include "drm_atomic.h"
-#include "drm.h"
-#include "queue.h"
-#include "drm_linux_list.h"
-
-#ifndef __inline__
-#define __inline__ inline
-#endif
-
-#if !defined(__FUNCTION__)
-#if defined(C99)
-#define __FUNCTION__ __func__
-#else
-#define __FUNCTION__ " "
-#endif
-#endif
-
-/* DRM space units */
-#define DRM_PAGE_SHIFT PAGESHIFT
-#define DRM_PAGE_SIZE (1 << DRM_PAGE_SHIFT)
-#define DRM_PAGE_OFFSET (DRM_PAGE_SIZE - 1)
-#define DRM_PAGE_MASK ~(DRM_PAGE_SIZE - 1)
-#define DRM_MB2PAGES(x) ((x) << 8)
-#define DRM_PAGES2BYTES(x) ((x) << DRM_PAGE_SHIFT)
-#define DRM_BYTES2PAGES(x) ((x) >> DRM_PAGE_SHIFT)
-#define DRM_PAGES2KB(x) ((x) << 2)
-#define DRM_ALIGNED(offset) (((offset) & DRM_PAGE_OFFSET) == 0)
-
-#define PAGE_SHIFT DRM_PAGE_SHIFT
-#define PAGE_SIZE DRM_PAGE_SIZE
-
-#define DRM_MAX_INSTANCES 8
-#define DRM_DEVNODE "drm"
-#define DRM_UNOPENED 0
-#define DRM_OPENED 1
-
-#define DRM_HASH_SIZE 16 /* Size of key hash table */
-#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
-#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
-
-#define DRM_MEM_DMA 0
-#define DRM_MEM_SAREA 1
-#define DRM_MEM_DRIVER 2
-#define DRM_MEM_MAGIC 3
-#define DRM_MEM_IOCTLS 4
-#define DRM_MEM_MAPS 5
-#define DRM_MEM_BUFS 6
-#define DRM_MEM_SEGS 7
-#define DRM_MEM_PAGES 8
-#define DRM_MEM_FILES 9
-#define DRM_MEM_QUEUES 10
-#define DRM_MEM_CMDS 11
-#define DRM_MEM_MAPPINGS 12
-#define DRM_MEM_BUFLISTS 13
-#define DRM_MEM_DRMLISTS 14
-#define DRM_MEM_TOTALDRM 15
-#define DRM_MEM_BOUNDDRM 16
-#define DRM_MEM_CTXBITMAP 17
-#define DRM_MEM_STUB 18
-#define DRM_MEM_SGLISTS 19
-#define DRM_MEM_AGPLISTS 20
-#define DRM_MEM_CTXLIST 21
-#define DRM_MEM_MM 22
-#define DRM_MEM_HASHTAB 23
-#define DRM_MEM_OBJECTS 24
-
-#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
-#define DRM_MAP_HASH_OFFSET 0x10000000
-#define DRM_MAP_HASH_ORDER 12
-#define DRM_OBJECT_HASH_ORDER 12
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-#define DRM_MM_INIT_MAX_PAGES 256
-
-
-/* Internal types and structures */
-#define DRM_ARRAY_SIZE(x) (sizeof (x) / sizeof (x[0]))
-#define DRM_MIN(a, b) ((a) < (b) ? (a) : (b))
-#define DRM_MAX(a, b) ((a) > (b) ? (a) : (b))
-
-#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
-
-#define __OS_HAS_AGP 1
-
-#define DRM_DEV_MOD (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
-#define DRM_DEV_UID 0
-#define DRM_DEV_GID 0
-
-#define DRM_CURRENTPID ddi_get_pid()
-#define DRM_SPINLOCK(l) mutex_enter(l)
-#define DRM_SPINUNLOCK(u) mutex_exit(u)
-#define DRM_SPINLOCK_ASSERT(l)
-#define DRM_LOCK() mutex_enter(&dev->dev_lock)
-#define DRM_UNLOCK() mutex_exit(&dev->dev_lock)
-#define DRM_LOCK_OWNED() ASSERT(mutex_owned(&dev->dev_lock))
-#define spin_lock_irqsave(l, flag) mutex_enter(l)
-#define spin_unlock_irqrestore(u, flag) mutex_exit(u)
-#define spin_lock(l) mutex_enter(l)
-#define spin_unlock(u) mutex_exit(u)
-
-
-#define DRM_UDELAY(sec) delay(drv_usectohz(sec *1000))
-#define DRM_MEMORYBARRIER()
-
-typedef struct drm_file drm_file_t;
-typedef struct drm_device drm_device_t;
-typedef struct drm_driver_info drm_driver_t;
-
-#define DRM_DEVICE drm_device_t *dev = dev1
-#define DRM_IOCTL_ARGS \
- drm_device_t *dev1, intptr_t data, drm_file_t *fpriv, int mode
-
-#define DRM_COPYFROM_WITH_RETURN(dest, src, size) \
- if (ddi_copyin((src), (dest), (size), 0)) { \
- DRM_ERROR("%s: copy from user failed", __func__); \
- return (EFAULT); \
- }
-
-#define DRM_COPYTO_WITH_RETURN(dest, src, size) \
- if (ddi_copyout((src), (dest), (size), 0)) { \
- DRM_ERROR("%s: copy to user failed", __func__); \
- return (EFAULT); \
- }
-
-#define DRM_COPY_FROM_USER(dest, src, size) \
- ddi_copyin((src), (dest), (size), 0) /* flag for src */
-
-#define DRM_COPY_TO_USER(dest, src, size) \
- ddi_copyout((src), (dest), (size), 0) /* flags for dest */
-
-#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
- ddi_copyin((arg2), (arg1), (arg3), 0)
-
-#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
- ddi_copyout((arg2), arg1, arg3, 0)
-
-#define DRM_READ8(map, offset) \
- *(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset))
-#define DRM_READ16(map, offset) \
- *(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset))
-#define DRM_READ32(map, offset) \
- *(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset))
-#define DRM_WRITE8(map, offset, val) \
- *(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
-#define DRM_WRITE16(map, offset, val) \
- *(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
-#define DRM_WRITE32(map, offset, val) \
- *(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
-
-typedef struct drm_wait_queue {
- kcondvar_t cv;
- kmutex_t lock;
-}wait_queue_head_t;
-
-#define DRM_INIT_WAITQUEUE(q, pri) \
-{ \
- mutex_init(&(q)->lock, NULL, MUTEX_DRIVER, pri); \
- cv_init(&(q)->cv, NULL, CV_DRIVER, NULL); \
-}
-
-#define DRM_FINI_WAITQUEUE(q) \
-{ \
- mutex_destroy(&(q)->lock); \
- cv_destroy(&(q)->cv); \
-}
-
-#define DRM_WAKEUP(q) \
-{ \
- mutex_enter(&(q)->lock); \
- cv_broadcast(&(q)->cv); \
- mutex_exit(&(q)->lock); \
-}
-
-#define jiffies ddi_get_lbolt()
-
-#define DRM_WAIT_ON(ret, q, timeout, condition) \
- mutex_enter(&(q)->lock); \
- while (!(condition)) { \
- ret = cv_reltimedwait_sig(&(q)->cv, &(q)->lock, timeout,\
- TR_CLOCK_TICK); \
- if (ret == -1) { \
- ret = EBUSY; \
- break; \
- } else if (ret == 0) { \
- ret = EINTR; \
- break; \
- } else { \
- ret = 0; \
- } \
- } \
- mutex_exit(&(q)->lock);
-
-#define DRM_WAIT(ret, q, condition) \
-mutex_enter(&(q)->lock); \
-if (!(condition)) { \
- ret = cv_timedwait_sig(&(q)->cv, &(q)->lock, jiffies + 30 * DRM_HZ); \
- if (ret == -1) { \
- /* gfx maybe hang */ \
- if (!(condition)) \
- ret = -2; \
- } else { \
- ret = 0; \
- } \
-} \
-mutex_exit(&(q)->lock);
-
-
-#define DRM_GETSAREA() \
-{ \
- drm_local_map_t *map; \
- DRM_SPINLOCK_ASSERT(&dev->dev_lock); \
- TAILQ_FOREACH(map, &dev->maplist, link) { \
- if (map->type == _DRM_SHM && \
- map->flags & _DRM_CONTAINS_LOCK) { \
- dev_priv->sarea = map; \
- break; \
- } \
- } \
-}
-
-#define LOCK_TEST_WITH_RETURN(dev, fpriv) \
- if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \
- dev->lock.filp != fpriv) { \
- DRM_DEBUG("%s called without lock held", __func__); \
- return (EINVAL); \
- }
-
-#define DRM_IRQ_ARGS caddr_t arg
-#define IRQ_HANDLED DDI_INTR_CLAIMED
-#define IRQ_NONE DDI_INTR_UNCLAIMED
-
-enum {
- DRM_IS_NOT_AGP,
- DRM_IS_AGP,
- DRM_MIGHT_BE_AGP
-};
-
-/* Capabilities taken from src/sys/dev/pci/pcireg.h. */
-#ifndef PCIY_AGP
-#define PCIY_AGP 0x02
-#endif
-
-#ifndef PCIY_EXPRESS
-#define PCIY_EXPRESS 0x10
-#endif
-
-#define PAGE_ALIGN(addr) (((addr) + DRM_PAGE_SIZE - 1) & DRM_PAGE_MASK)
-#define DRM_SUSER(p) (crgetsgid(p) == 0 || crgetsuid(p) == 0)
-
-#define DRM_GEM_OBJIDR_HASHNODE 1024
-#define idr_list_for_each(entry, head) \
- for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) \
- list_for_each(entry, &(head)->next[key])
-
-/*
- * wait for 400 milliseconds
- */
-#define DRM_HZ drv_usectohz(400000)
-
-typedef unsigned long dma_addr_t;
-typedef uint64_t u64;
-typedef uint32_t u32;
-typedef uint16_t u16;
-typedef uint8_t u8;
-typedef uint_t irqreturn_t;
-
-#define DRM_SUPPORT 1
-#define DRM_UNSUPPORT 0
-
-#define __OS_HAS_AGP 1
-
-typedef struct drm_pci_id_list
-{
- int vendor;
- int device;
- long driver_private;
- char *name;
-} drm_pci_id_list_t;
-
-#define DRM_AUTH 0x1
-#define DRM_MASTER 0x2
-#define DRM_ROOT_ONLY 0x4
-typedef int drm_ioctl_t(DRM_IOCTL_ARGS);
-typedef struct drm_ioctl_desc {
- int (*func)(DRM_IOCTL_ARGS);
- int flags;
-} drm_ioctl_desc_t;
-
-typedef struct drm_magic_entry {
- drm_magic_t magic;
- struct drm_file *priv;
- struct drm_magic_entry *next;
-} drm_magic_entry_t;
-
-typedef struct drm_magic_head {
- struct drm_magic_entry *head;
- struct drm_magic_entry *tail;
-} drm_magic_head_t;
-
-typedef struct drm_buf {
- int idx; /* Index into master buflist */
- int total; /* Buffer size */
- int order; /* log-base-2(total) */
- int used; /* Amount of buffer in use (for DMA) */
- unsigned long offset; /* Byte offset (used internally) */
- void *address; /* Address of buffer */
- unsigned long bus_address; /* Bus address of buffer */
- struct drm_buf *next; /* Kernel-only: used for free list */
- volatile int pending; /* On hardware DMA queue */
- drm_file_t *filp;
- /* Uniq. identifier of holding process */
- int context; /* Kernel queue for this buffer */
- enum {
- DRM_LIST_NONE = 0,
- DRM_LIST_FREE = 1,
- DRM_LIST_WAIT = 2,
- DRM_LIST_PEND = 3,
- DRM_LIST_PRIO = 4,
- DRM_LIST_RECLAIM = 5
- } list; /* Which list we're on */
-
- int dev_priv_size; /* Size of buffer private stoarge */
- void *dev_private; /* Per-buffer private storage */
-} drm_buf_t;
-
-typedef struct drm_freelist {
- int initialized; /* Freelist in use */
- uint32_t count; /* Number of free buffers */
- drm_buf_t *next; /* End pointer */
-
- int low_mark; /* Low water mark */
- int high_mark; /* High water mark */
-} drm_freelist_t;
-
-typedef struct drm_buf_entry {
- int buf_size;
- int buf_count;
- drm_buf_t *buflist;
- int seg_count;
- int page_order;
-
- uint32_t *seglist;
- unsigned long *seglist_bus;
-
- drm_freelist_t freelist;
-} drm_buf_entry_t;
-
-typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
-
-/* BEGIN CSTYLED */
-typedef struct drm_local_map {
- unsigned long offset; /* Physical address (0 for SAREA) */
- unsigned long size; /* Physical size (bytes) */
- drm_map_type_t type; /* Type of memory mapped */
- drm_map_flags_t flags; /* Flags */
- void *handle; /* User-space: "Handle" to pass to mmap */
- /* Kernel-space: kernel-virtual address */
- int mtrr; /* Boolean: MTRR used */
- /* Private data */
- int rid; /* PCI resource ID for bus_space */
- int kernel_owned; /* Boolean: 1= initmapped, 0= addmapped */
- caddr_t dev_addr; /* base device address */
- ddi_acc_handle_t dev_handle; /* The data access handle */
- ddi_umem_cookie_t drm_umem_cookie; /* For SAREA alloc and free */
- TAILQ_ENTRY(drm_local_map) link;
-} drm_local_map_t;
-/* END CSTYLED */
-
-/*
- * This structure defines the drm_mm memory object, which will be used by the
- * DRM for its buffer objects.
- */
-struct drm_gem_object {
- /* Reference count of this object */
- atomic_t refcount;
-
- /* Handle count of this object. Each handle also holds a reference */
- atomic_t handlecount;
-
- /* Related drm device */
- struct drm_device *dev;
-
- int flink;
- /*
- * Size of the object, in bytes. Immutable over the object's
- * lifetime.
- */
- size_t size;
-
- /*
- * Global name for this object, starts at 1. 0 means unnamed.
- * Access is covered by the object_name_lock in the related drm_device
- */
- int name;
-
- /*
- * Memory domains. These monitor which caches contain read/write data
- * related to the object. When transitioning from one set of domains
- * to another, the driver is called to ensure that caches are suitably
- * flushed and invalidated
- */
- uint32_t read_domains;
- uint32_t write_domain;
-
- /*
- * While validating an exec operation, the
- * new read/write domain values are computed here.
- * They will be transferred to the above values
- * at the point that any cache flushing occurs
- */
- uint32_t pending_read_domains;
- uint32_t pending_write_domain;
-
- void *driver_private;
-
- drm_local_map_t *map;
- ddi_dma_handle_t dma_hdl;
- ddi_acc_handle_t acc_hdl;
- caddr_t kaddr;
- size_t real_size; /* real size of memory */
- pfn_t *pfnarray;
-};
-
-struct idr_list {
- struct idr_list *next, *prev;
- struct drm_gem_object *obj;
- uint32_t handle;
- caddr_t contain_ptr;
-};
-
-struct drm_file {
- TAILQ_ENTRY(drm_file) link;
- int authenticated;
- int master;
- int minor;
- pid_t pid;
- uid_t uid;
- int refs;
- drm_magic_t magic;
- unsigned long ioctl_count;
- void *driver_priv;
- /* Mapping of mm object handles to object pointers. */
- struct idr_list object_idr;
- /* Lock for synchronization of access to object_idr. */
- kmutex_t table_lock;
-
- dev_t dev;
- cred_t *credp;
-};
-
-typedef struct drm_lock_data {
- drm_hw_lock_t *hw_lock; /* Hardware lock */
- drm_file_t *filp;
- /* Uniq. identifier of holding process */
- kcondvar_t lock_cv; /* lock queue - SOLARIS Specific */
- kmutex_t lock_mutex; /* lock - SOLARIS Specific */
- unsigned long lock_time; /* Time of last lock in clock ticks */
-} drm_lock_data_t;
-
-/*
- * This structure, in drm_device_t, is always initialized while the device
- * is open. dev->dma_lock protects the incrementing of dev->buf_use, which
- * when set marks that no further bufs may be allocated until device teardown
- * occurs (when the last open of the device has closed). The high/low
- * watermarks of bufs are only touched by the X Server, and thus not
- * concurrently accessed, so no locking is needed.
- */
-typedef struct drm_device_dma {
- drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
- int buf_count;
- drm_buf_t **buflist; /* Vector of pointers info bufs */
- int seg_count;
- int page_count;
- unsigned long *pagelist;
- unsigned long byte_count;
- enum {
- _DRM_DMA_USE_AGP = 0x01,
- _DRM_DMA_USE_SG = 0x02
- } flags;
-} drm_device_dma_t;
-
-typedef struct drm_agp_mem {
- void *handle;
- unsigned long bound; /* address */
- int pages;
- caddr_t phys_addr;
- struct drm_agp_mem *prev;
- struct drm_agp_mem *next;
-} drm_agp_mem_t;
-
-typedef struct drm_agp_head {
- agp_info_t agp_info;
- const char *chipset;
- drm_agp_mem_t *memory;
- unsigned long mode;
- int enabled;
- int acquired;
- unsigned long base;
- int mtrr;
- int cant_use_aperture;
- unsigned long page_mask;
- ldi_ident_t agpgart_li;
- ldi_handle_t agpgart_lh;
-} drm_agp_head_t;
-
-
-typedef struct drm_dma_handle {
- ddi_dma_handle_t dma_hdl;
- ddi_acc_handle_t acc_hdl;
- ddi_dma_cookie_t cookie;
- uint_t cookie_num;
- uintptr_t vaddr; /* virtual addr */
- uintptr_t paddr; /* physical addr */
- size_t real_sz; /* real size of memory */
-} drm_dma_handle_t;
-
-typedef struct drm_sg_mem {
- unsigned long handle;
- void *virtual;
- int pages;
- dma_addr_t *busaddr;
- ddi_umem_cookie_t *umem_cookie;
- drm_dma_handle_t *dmah_sg;
- drm_dma_handle_t *dmah_gart; /* Handle to PCI memory */
-} drm_sg_mem_t;
-
-/*
- * Generic memory manager structs
- */
-
-struct drm_mm_node {
- struct list_head fl_entry;
- struct list_head ml_entry;
- int free;
- unsigned long start;
- unsigned long size;
- struct drm_mm *mm;
- void *private;
-};
-
-struct drm_mm {
- struct list_head fl_entry;
- struct list_head ml_entry;
-};
-
-typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t;
-
-typedef TAILQ_HEAD(drm_vbl_sig_list, drm_vbl_sig) drm_vbl_sig_list_t;
-typedef struct drm_vbl_sig {
- TAILQ_ENTRY(drm_vbl_sig) link;
- unsigned int sequence;
- int signo;
- int pid;
-} drm_vbl_sig_t;
-
-
-/* used for clone device */
-typedef TAILQ_HEAD(drm_cminor_list, drm_cminor) drm_cminor_list_t;
-typedef struct drm_cminor {
- TAILQ_ENTRY(drm_cminor) link;
- drm_file_t *fpriv;
- int minor;
-} drm_cminor_t;
-
-/* location of GART table */
-#define DRM_ATI_GART_MAIN 1
-#define DRM_ATI_GART_FB 2
-
-typedef struct ati_pcigart_info {
- int gart_table_location;
- int is_pcie;
- void *addr;
- dma_addr_t bus_addr;
- drm_local_map_t mapping;
-} drm_ati_pcigart_info;
-
-/* DRM device structure */
-struct drm_device;
-struct drm_driver_info {
- int (*load)(struct drm_device *, unsigned long);
- int (*firstopen)(struct drm_device *);
- int (*open)(struct drm_device *, drm_file_t *);
- void (*preclose)(struct drm_device *, drm_file_t *);
- void (*postclose)(struct drm_device *, drm_file_t *);
- void (*lastclose)(struct drm_device *);
- int (*unload)(struct drm_device *);
- void (*reclaim_buffers_locked)(struct drm_device *, drm_file_t *);
- int (*presetup)(struct drm_device *);
- int (*postsetup)(struct drm_device *);
- int (*open_helper)(struct drm_device *, drm_file_t *);
- void (*free_filp_priv)(struct drm_device *, drm_file_t *);
- void (*release)(struct drm_device *, void *);
- int (*dma_ioctl)(DRM_IOCTL_ARGS);
- void (*dma_ready)(struct drm_device *);
- int (*dma_quiescent)(struct drm_device *);
- int (*dma_flush_block_and_flush)(struct drm_device *,
- int, drm_lock_flags_t);
- int (*dma_flush_unblock)(struct drm_device *, int,
- drm_lock_flags_t);
- int (*context_ctor)(struct drm_device *, int);
- int (*context_dtor)(struct drm_device *, int);
- int (*kernel_context_switch)(struct drm_device *, int, int);
- int (*kernel_context_switch_unlock)(struct drm_device *);
- int (*device_is_agp) (struct drm_device *);
- int (*irq_preinstall)(struct drm_device *);
- void (*irq_postinstall)(struct drm_device *);
- void (*irq_uninstall)(struct drm_device *dev);
- uint_t (*irq_handler)(DRM_IRQ_ARGS);
- int (*vblank_wait)(struct drm_device *, unsigned int *);
- int (*vblank_wait2)(struct drm_device *, unsigned int *);
- /* added for intel minimized vblank */
- u32 (*get_vblank_counter)(struct drm_device *dev, int crtc);
- int (*enable_vblank)(struct drm_device *dev, int crtc);
- void (*disable_vblank)(struct drm_device *dev, int crtc);
-
- /*
- * Driver-specific constructor for drm_gem_objects, to set up
- * obj->driver_private.
- *
- * Returns 0 on success.
- */
- int (*gem_init_object) (struct drm_gem_object *obj);
- void (*gem_free_object) (struct drm_gem_object *obj);
-
-
- drm_ioctl_desc_t *driver_ioctls;
- int max_driver_ioctl;
-
- int buf_priv_size;
- int driver_major;
- int driver_minor;
- int driver_patchlevel;
- const char *driver_name; /* Simple driver name */
- const char *driver_desc; /* Longer driver name */
- const char *driver_date; /* Date of last major changes. */
-
- unsigned use_agp :1;
- unsigned require_agp :1;
- unsigned use_sg :1;
- unsigned use_dma :1;
- unsigned use_pci_dma :1;
- unsigned use_dma_queue :1;
- unsigned use_irq :1;
- unsigned use_vbl_irq :1;
- unsigned use_vbl_irq2 :1;
- unsigned use_mtrr :1;
- unsigned use_gem;
-};
-
-/*
- * hardware-specific code needs to initialize mutexes which
- * can be used in interrupt context, so they need to know
- * the interrupt priority. Interrupt cookie in drm_device
- * structure is the intr_block field.
- */
-#define DRM_INTR_PRI(dev) \
- DDI_INTR_PRI((dev)->intr_block)
-
-struct drm_device {
- drm_driver_t *driver;
- drm_cminor_list_t minordevs;
- dev_info_t *dip;
- void *drm_handle;
- int drm_supported;
- const char *desc; /* current driver description */
- kmutex_t *irq_mutex;
- kcondvar_t *irq_cv;
-
- ddi_iblock_cookie_t intr_block;
- uint32_t pci_device; /* PCI device id */
- uint32_t pci_vendor;
- char *unique; /* Unique identifier: e.g., busid */
- int unique_len; /* Length of unique field */
- int if_version; /* Highest interface version set */
- int flags; /* Flags to open(2) */
-
- /* Locks */
- kmutex_t vbl_lock; /* protects vblank operations */
- kmutex_t dma_lock; /* protects dev->dma */
- kmutex_t irq_lock; /* protects irq condition checks */
- kmutex_t dev_lock; /* protects everything else */
- drm_lock_data_t lock; /* Information on hardware lock */
- kmutex_t struct_mutex; /* < For others */
-
- /* Usage Counters */
- int open_count; /* Outstanding files open */
- int buf_use; /* Buffers in use -- cannot alloc */
-
- /* Performance counters */
- unsigned long counters;
- drm_stat_type_t types[15];
- uint32_t counts[15];
-
- /* Authentication */
- drm_file_list_t files;
- drm_magic_head_t magiclist[DRM_HASH_SIZE];
-
- /* Linked list of mappable regions. Protected by dev_lock */
- drm_map_list_t maplist;
-
- drm_local_map_t **context_sareas;
- int max_context;
-
- /* DMA queues (contexts) */
- drm_device_dma_t *dma; /* Optional pointer for DMA support */
-
- /* Context support */
- int irq; /* Interrupt used by board */
- int irq_enabled; /* True if the irq handler is enabled */
- int pci_domain;
- int pci_bus;
- int pci_slot;
- int pci_func;
- atomic_t context_flag; /* Context swapping flag */
- int last_context; /* Last current context */
-
- /* Only used for Radeon */
- atomic_t vbl_received;
- atomic_t vbl_received2;
-
- drm_vbl_sig_list_t vbl_sig_list;
- drm_vbl_sig_list_t vbl_sig_list2;
- /*
- * At load time, disabling the vblank interrupt won't be allowed since
- * old clients may not call the modeset ioctl and therefore misbehave.
- * Once the modeset ioctl *has* been called though, we can safely
- * disable them when unused.
- */
- int vblank_disable_allowed;
-
- wait_queue_head_t vbl_queue; /* vbl wait channel */
- /* vbl wait channel array */
- wait_queue_head_t *vbl_queues;
-
- /* number of VBLANK interrupts */
- /* (driver must alloc the right number of counters) */
- atomic_t *_vblank_count;
- /* signal list to send on VBLANK */
- struct drm_vbl_sig_list *vbl_sigs;
-
- /* number of signals pending on all crtcs */
- atomic_t vbl_signal_pending;
- /* number of users of vblank interrupts per crtc */
- atomic_t *vblank_refcount;
- /* protected by dev->vbl_lock, used for wraparound handling */
- u32 *last_vblank;
- /* so we don't call enable more than */
- atomic_t *vblank_enabled;
- /* Display driver is setting mode */
- int *vblank_inmodeset;
- /* Don't wait while crtc is likely disabled */
- int *vblank_suspend;
- /* size of vblank counter register */
- u32 max_vblank_count;
- int num_crtcs;
- kmutex_t tasklet_lock;
- void (*locked_tasklet_func)(struct drm_device *dev);
-
- pid_t buf_pgid;
- drm_agp_head_t *agp;
- drm_sg_mem_t *sg; /* Scatter gather memory */
- uint32_t *ctx_bitmap;
- void *dev_private;
- unsigned int agp_buffer_token;
- drm_local_map_t *agp_buffer_map;
-
- kstat_t *asoft_ksp; /* kstat support */
-
- /* name Drawable information */
- kmutex_t drw_lock;
- unsigned int drw_bitfield_length;
- u32 *drw_bitfield;
- unsigned int drw_info_length;
- drm_drawable_info_t **drw_info;
-
- /* \name GEM information */
- /* @{ */
- kmutex_t object_name_lock;
- struct idr_list object_name_idr;
- atomic_t object_count;
- atomic_t object_memory;
- atomic_t pin_count;
- atomic_t pin_memory;
- atomic_t gtt_count;
- atomic_t gtt_memory;
- uint32_t gtt_total;
- uint32_t invalidate_domains; /* domains pending invalidation */
- uint32_t flush_domains; /* domains pending flush */
- /* @} */
-
- /*
- * Saving S3 context
- */
- void *s3_private;
-};
-
-/* Memory management support (drm_memory.c) */
-void drm_mem_init(void);
-void drm_mem_uninit(void);
-void *drm_alloc(size_t, int);
-void *drm_calloc(size_t, size_t, int);
-void *drm_realloc(void *, size_t, size_t, int);
-void drm_free(void *, size_t, int);
-int drm_ioremap(drm_device_t *, drm_local_map_t *);
-void drm_ioremapfree(drm_local_map_t *);
-
-void drm_core_ioremap(struct drm_local_map *, struct drm_device *);
-void drm_core_ioremapfree(struct drm_local_map *, struct drm_device *);
-
-void drm_pci_free(drm_device_t *, drm_dma_handle_t *);
-void *drm_pci_alloc(drm_device_t *, size_t, size_t, dma_addr_t, int);
-
-struct drm_local_map *drm_core_findmap(struct drm_device *, unsigned long);
-
-int drm_context_switch(drm_device_t *, int, int);
-int drm_context_switch_complete(drm_device_t *, int);
-int drm_ctxbitmap_init(drm_device_t *);
-void drm_ctxbitmap_cleanup(drm_device_t *);
-void drm_ctxbitmap_free(drm_device_t *, int);
-int drm_ctxbitmap_next(drm_device_t *);
-
-/* Locking IOCTL support (drm_lock.c) */
-int drm_lock_take(drm_lock_data_t *, unsigned int);
-int drm_lock_transfer(drm_device_t *,
- drm_lock_data_t *, unsigned int);
-int drm_lock_free(drm_device_t *,
- volatile unsigned int *, unsigned int);
-
-/* Buffer management support (drm_bufs.c) */
-unsigned long drm_get_resource_start(drm_device_t *, unsigned int);
-unsigned long drm_get_resource_len(drm_device_t *, unsigned int);
-int drm_initmap(drm_device_t *, unsigned long, unsigned long,
- unsigned int, int, int);
-void drm_rmmap(drm_device_t *, drm_local_map_t *);
-int drm_addmap(drm_device_t *, unsigned long, unsigned long,
- drm_map_type_t, drm_map_flags_t, drm_local_map_t **);
-int drm_order(unsigned long);
-
-/* DMA support (drm_dma.c) */
-int drm_dma_setup(drm_device_t *);
-void drm_dma_takedown(drm_device_t *);
-void drm_free_buffer(drm_device_t *, drm_buf_t *);
-void drm_reclaim_buffers(drm_device_t *, drm_file_t *);
-#define drm_core_reclaim_buffers drm_reclaim_buffers
-
-/* IRQ support (drm_irq.c) */
-int drm_irq_install(drm_device_t *);
-int drm_irq_uninstall(drm_device_t *);
-uint_t drm_irq_handler(DRM_IRQ_ARGS);
-void drm_driver_irq_preinstall(drm_device_t *);
-void drm_driver_irq_postinstall(drm_device_t *);
-void drm_driver_irq_uninstall(drm_device_t *);
-int drm_vblank_wait(drm_device_t *, unsigned int *);
-void drm_vbl_send_signals(drm_device_t *);
-void drm_handle_vblank(struct drm_device *dev, int crtc);
-u32 drm_vblank_count(struct drm_device *dev, int crtc);
-int drm_vblank_get(struct drm_device *dev, int crtc);
-void drm_vblank_put(struct drm_device *dev, int crtc);
-int drm_vblank_init(struct drm_device *dev, int num_crtcs);
-void drm_vblank_cleanup(struct drm_device *dev);
-int drm_modeset_ctl(DRM_IOCTL_ARGS);
-
-/* AGP/GART support (drm_agpsupport.c) */
-int drm_device_is_agp(drm_device_t *);
-int drm_device_is_pcie(drm_device_t *);
-drm_agp_head_t *drm_agp_init(drm_device_t *);
-void drm_agp_fini(drm_device_t *);
-int drm_agp_do_release(drm_device_t *);
-void *drm_agp_allocate_memory(size_t pages,
- uint32_t type, drm_device_t *dev);
-int drm_agp_free_memory(agp_allocate_t *handle, drm_device_t *dev);
-int drm_agp_bind_memory(unsigned int, uint32_t, drm_device_t *);
-int drm_agp_unbind_memory(unsigned long, drm_device_t *);
-int drm_agp_bind_pages(drm_device_t *dev,
- pfn_t *pages,
- unsigned long num_pages,
- uint32_t gtt_offset);
-int drm_agp_unbind_pages(drm_device_t *dev,
- unsigned long num_pages,
- uint32_t gtt_offset,
- uint32_t type);
-void drm_agp_chipset_flush(struct drm_device *dev);
-void drm_agp_rebind(struct drm_device *dev);
-
-/* kstat support (drm_kstats.c) */
-int drm_init_kstats(drm_device_t *);
-void drm_fini_kstats(drm_device_t *);
-
-/* Scatter Gather Support (drm_scatter.c) */
-void drm_sg_cleanup(drm_device_t *, drm_sg_mem_t *);
-
-/* ATI PCIGART support (ati_pcigart.c) */
-int drm_ati_pcigart_init(drm_device_t *, drm_ati_pcigart_info *);
-int drm_ati_pcigart_cleanup(drm_device_t *, drm_ati_pcigart_info *);
-
-/* Locking IOCTL support (drm_drv.c) */
-int drm_lock(DRM_IOCTL_ARGS);
-int drm_unlock(DRM_IOCTL_ARGS);
-int drm_version(DRM_IOCTL_ARGS);
-int drm_setversion(DRM_IOCTL_ARGS);
-/* Cache management (drm_cache.c) */
-void drm_clflush_pages(caddr_t *pages, unsigned long num_pages);
-
-/* Misc. IOCTL support (drm_ioctl.c) */
-int drm_irq_by_busid(DRM_IOCTL_ARGS);
-int drm_getunique(DRM_IOCTL_ARGS);
-int drm_setunique(DRM_IOCTL_ARGS);
-int drm_getmap(DRM_IOCTL_ARGS);
-int drm_getclient(DRM_IOCTL_ARGS);
-int drm_getstats(DRM_IOCTL_ARGS);
-int drm_noop(DRM_IOCTL_ARGS);
-
-/* Context IOCTL support (drm_context.c) */
-int drm_resctx(DRM_IOCTL_ARGS);
-int drm_addctx(DRM_IOCTL_ARGS);
-int drm_modctx(DRM_IOCTL_ARGS);
-int drm_getctx(DRM_IOCTL_ARGS);
-int drm_switchctx(DRM_IOCTL_ARGS);
-int drm_newctx(DRM_IOCTL_ARGS);
-int drm_rmctx(DRM_IOCTL_ARGS);
-int drm_setsareactx(DRM_IOCTL_ARGS);
-int drm_getsareactx(DRM_IOCTL_ARGS);
-
-/* Drawable IOCTL support (drm_drawable.c) */
-int drm_adddraw(DRM_IOCTL_ARGS);
-int drm_rmdraw(DRM_IOCTL_ARGS);
-int drm_update_draw(DRM_IOCTL_ARGS);
-
-/* Authentication IOCTL support (drm_auth.c) */
-int drm_getmagic(DRM_IOCTL_ARGS);
-int drm_authmagic(DRM_IOCTL_ARGS);
-int drm_remove_magic(drm_device_t *, drm_magic_t);
-drm_file_t *drm_find_file(drm_device_t *, drm_magic_t);
-/* Buffer management support (drm_bufs.c) */
-int drm_addmap_ioctl(DRM_IOCTL_ARGS);
-int drm_rmmap_ioctl(DRM_IOCTL_ARGS);
-int drm_addbufs_ioctl(DRM_IOCTL_ARGS);
-int drm_infobufs(DRM_IOCTL_ARGS);
-int drm_markbufs(DRM_IOCTL_ARGS);
-int drm_freebufs(DRM_IOCTL_ARGS);
-int drm_mapbufs(DRM_IOCTL_ARGS);
-
-/* DMA support (drm_dma.c) */
-int drm_dma(DRM_IOCTL_ARGS);
-
-/* IRQ support (drm_irq.c) */
-int drm_control(DRM_IOCTL_ARGS);
-int drm_wait_vblank(DRM_IOCTL_ARGS);
-
-/* AGP/GART support (drm_agpsupport.c) */
-int drm_agp_acquire(DRM_IOCTL_ARGS);
-int drm_agp_release(DRM_IOCTL_ARGS);
-int drm_agp_enable(DRM_IOCTL_ARGS);
-int drm_agp_info(DRM_IOCTL_ARGS);
-int drm_agp_alloc(DRM_IOCTL_ARGS);
-int drm_agp_free(DRM_IOCTL_ARGS);
-int drm_agp_unbind(DRM_IOCTL_ARGS);
-int drm_agp_bind(DRM_IOCTL_ARGS);
-
-/* Scatter Gather Support (drm_scatter.c) */
-int drm_sg_alloc(DRM_IOCTL_ARGS);
-int drm_sg_free(DRM_IOCTL_ARGS);
-
-/* drm_mm.c */
-struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
- unsigned long size, unsigned alignment);
-struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment, int best_match);
-
-extern void drm_mm_clean_ml(const struct drm_mm *mm);
-extern int drm_debug_flag;
-
-/* We add function to support DRM_DEBUG,DRM_ERROR,DRM_INFO */
-extern void drm_debug(const char *fmt, ...);
-extern void drm_error(const char *fmt, ...);
-extern void drm_info(const char *fmt, ...);
-
-#ifdef DEBUG
-#define DRM_DEBUG if (drm_debug_flag >= 2) drm_debug
-#define DRM_INFO if (drm_debug_flag >= 1) drm_info
-#else
-#define DRM_DEBUG(...)
-#define DRM_INFO(...)
-#endif
-
-#define DRM_ERROR drm_error
-
-
-#define MAX_INSTNUMS 16
-
-extern int drm_dev_to_instance(dev_t);
-extern int drm_dev_to_minor(dev_t);
-extern void *drm_supp_register(dev_info_t *, drm_device_t *);
-extern int drm_supp_unregister(void *);
-
-extern int drm_open(drm_device_t *, drm_cminor_t *, int, int, cred_t *);
-extern int drm_close(drm_device_t *, int, int, int, cred_t *);
-extern int drm_attach(drm_device_t *);
-extern int drm_detach(drm_device_t *);
-extern int drm_probe(drm_device_t *, drm_pci_id_list_t *);
-
-extern int drm_pci_init(drm_device_t *);
-extern void drm_pci_end(drm_device_t *);
-extern int pci_get_info(drm_device_t *, int *, int *, int *);
-extern int pci_get_irq(drm_device_t *);
-extern int pci_get_vendor(drm_device_t *);
-extern int pci_get_device(drm_device_t *);
-
-extern struct drm_drawable_info *drm_get_drawable_info(drm_device_t *,
- drm_drawable_t);
-/* File Operations helpers (drm_fops.c) */
-extern drm_file_t *drm_find_file_by_proc(drm_device_t *, cred_t *);
-extern drm_cminor_t *drm_find_file_by_minor(drm_device_t *, int);
-extern int drm_open_helper(drm_device_t *, drm_cminor_t *, int, int,
- cred_t *);
-
-/* Graphics Execution Manager library functions (drm_gem.c) */
-int drm_gem_init(struct drm_device *dev);
-void drm_gem_object_free(struct drm_gem_object *obj);
-struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
- size_t size);
-void drm_gem_object_handle_free(struct drm_gem_object *obj);
-
-void drm_gem_object_reference(struct drm_gem_object *obj);
-void drm_gem_object_unreference(struct drm_gem_object *obj);
-
-int drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
- int *handlep);
-void drm_gem_object_handle_reference(struct drm_gem_object *obj);
-
-void drm_gem_object_handle_unreference(struct drm_gem_object *obj);
-
-struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp,
- int handle);
-int drm_gem_close_ioctl(DRM_IOCTL_ARGS);
-int drm_gem_flink_ioctl(DRM_IOCTL_ARGS);
-int drm_gem_open_ioctl(DRM_IOCTL_ARGS);
-void drm_gem_open(struct drm_file *file_private);
-void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
-
-
-#endif /* _DRMP_H */
diff --git a/usr/src/uts/common/io/drm/drm_agpsupport.c b/usr/src/uts/common/io/drm/drm_agpsupport.c
index ae695da..272ab43 100644
--- a/usr/src/uts/common/io/drm/drm_agpsupport.c
+++ b/usr/src/uts/common/io/drm/drm_agpsupport.c
@@ -1,16 +1,19 @@
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*/
-/*
- * drm_agpsupport.h -- DRM support for AGP/GART backend -*- linux-c -*-
- * Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com
+/**
+ * \file drm_agpsupport.c
+ * DRM support for AGP/GART backend
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
*/
+
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2013, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,11 +34,6 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Author:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
*/
#include "drm.h"
@@ -55,449 +53,495 @@
*/
#define DRM_AGP_KEY_OFFSET 8
-extern int drm_supp_device_capability(void *handle, int capid);
-
-/*ARGSUSED*/
-int
-drm_device_is_agp(drm_device_t *dev)
+void drm_agp_cleanup(struct drm_device *dev)
{
- int ret;
-
- if (dev->driver->device_is_agp != NULL) {
- /*
- * device_is_agp returns a tristate:
- * 0 = not AGP;
- * 1 = definitely AGP;
- * 2 = fall back to PCI capability
- */
- ret = (*dev->driver->device_is_agp)(dev);
- if (ret != DRM_MIGHT_BE_AGP)
- return (ret);
- }
-
- return (drm_supp_device_capability(dev->drm_handle, PCIY_AGP));
+ struct drm_agp_head *agp = dev->agp;
+ (void) ldi_close(agp->agpgart_lh, FEXCL, kcred);
+ ldi_ident_release(agp->agpgart_li);
}
-/*ARGSUSED*/
-int
-drm_device_is_pcie(drm_device_t *dev)
-{
- return (drm_supp_device_capability(dev->drm_handle, PCIY_EXPRESS));
-}
-
-
-/*ARGSUSED*/
-int
-drm_agp_info(DRM_IOCTL_ARGS)
+/**
+ * Get AGP information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a (output) drm_agp_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been initialized and acquired and fills in the
+ * drm_agp_info structure with the information in drm_agp_head::agp_info.
+ */
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
{
- DRM_DEVICE;
- agp_info_t *agpinfo;
- drm_agp_info_t info;
+ agp_info_t *agpinfo;
if (!dev->agp || !dev->agp->acquired)
- return (EINVAL);
+ return -EINVAL;
agpinfo = &dev->agp->agp_info;
- info.agp_version_major = agpinfo->agpi_version.agpv_major;
- info.agp_version_minor = agpinfo->agpi_version.agpv_minor;
- info.mode = agpinfo->agpi_mode;
- info.aperture_base = agpinfo->agpi_aperbase;
- info.aperture_size = agpinfo->agpi_apersize* 1024 * 1024;
- info.memory_allowed = agpinfo->agpi_pgtotal << PAGE_SHIFT;
- info.memory_used = agpinfo->agpi_pgused << PAGE_SHIFT;
- info.id_vendor = agpinfo->agpi_devid & 0xffff;
- info.id_device = agpinfo->agpi_devid >> 16;
-
- DRM_COPYTO_WITH_RETURN((void *)data, &info, sizeof (info));
- return (0);
+ info->agp_version_major = agpinfo->agpi_version.agpv_major;
+ info->agp_version_minor = agpinfo->agpi_version.agpv_minor;
+ info->mode = agpinfo->agpi_mode;
+ info->aperture_base = agpinfo->agpi_aperbase;
+ info->aperture_size = agpinfo->agpi_apersize * 1024 * 1024;
+ info->memory_allowed = agpinfo->agpi_pgtotal << PAGE_SHIFT;
+ info->memory_used = agpinfo->agpi_pgused << PAGE_SHIFT;
+ info->id_vendor = agpinfo->agpi_devid & 0xffff;
+ info->id_device = agpinfo->agpi_devid >> 16;
+
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_agp_acquire(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_agp_info_ioctl(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- int ret, rval;
+ struct drm_agp_info *info = data;
+ int err;
- if (!dev->agp) {
- DRM_ERROR("drm_agp_acquire : agp isn't initialized yet");
- return (ENODEV);
- }
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_ACQUIRE,
- (uintptr_t)0, FKIOCTL, kcred, &rval);
- if (ret) {
- DRM_ERROR("drm_agp_acquired: AGPIOC_ACQUIRE failed\n");
- return (EIO);
- }
- dev->agp->acquired = 1;
+ err = drm_agp_info(dev, info);
+ if (err)
+ return err;
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_agp_release(DRM_IOCTL_ARGS)
+/**
+ * Acquire the AGP device.
+ *
+ * \param dev DRM device that is to acquire AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire(struct drm_device * dev)
{
- DRM_DEVICE;
- int ret, rval;
-
if (!dev->agp)
- return (ENODEV);
- if (!dev->agp->acquired)
- return (EBUSY);
-
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_RELEASE,
- (intptr_t)0, FKIOCTL, kcred, &rval);
- if (ret) {
- DRM_ERROR("drm_agp_release: AGPIOC_RELEASE failed\n");
- return (ENXIO);
+ return -ENODEV;
+ if (dev->agp->acquired)
+ return -EBUSY;
+ {
+ int ret, rval;
+ if (ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_ACQUIRE,
+ (uintptr_t)0, FKIOCTL, kcred, &rval)) {
+ DRM_ERROR("AGPIOC_ACQUIRE failed");
+ return -ret;
+ }
}
- dev->agp->acquired = 0;
-
- return (ret);
+ dev->agp->acquired = 1;
+ return 0;
}
-
-int
-drm_agp_do_release(drm_device_t *dev)
+/**
+ * Acquire the AGP device (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+/* LINTED */
+int drm_agp_acquire_ioctl(DRM_IOCTL_ARGS)
{
- int ret, rval;
-
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_RELEASE,
- (intptr_t)0, FKIOCTL, kcred, &rval);
-
- if (ret == 0)
- dev->agp->acquired = 0;
-
- return (ret);
+ return drm_agp_acquire((struct drm_device *) file->minor->dev);
}
-/*ARGSUSED*/
-int
-drm_agp_enable(DRM_IOCTL_ARGS)
+/**
+ * Release the AGP device.
+ *
+ * \param dev DRM device that is to release AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired and calls \c agp_backend_release.
+ */
+int drm_agp_release(struct drm_device * dev)
{
- DRM_DEVICE;
- drm_agp_mode_t modes;
- agp_setup_t setup;
- int ret, rval;
-
- if (!dev->agp)
- return (ENODEV);
- if (!dev->agp->acquired)
- return (EBUSY);
-
- DRM_COPYFROM_WITH_RETURN(&modes, (void *)data, sizeof (modes));
-
- dev->agp->mode = modes.mode;
- setup.agps_mode = (uint32_t)modes.mode;
-
+ if (!dev->agp || !dev->agp->acquired)
+ return -EINVAL;
+ {
+ int ret, rval;
+ if (ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_RELEASE,
+ (intptr_t)0, FKIOCTL, kcred, &rval)) {
+ DRM_ERROR("AGPIOC_RELEASE failed");
+ return -ret;
+ }
+ }
+ dev->agp->acquired = 0;
+ return 0;
+}
- DRM_DEBUG("drm_agp_enable: dev->agp->mode=%lx", modes.mode);
+/* LINTED */
+int drm_agp_release_ioctl(DRM_IOCTL_ARGS)
+{
+ return drm_agp_release(dev);
+}
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_SETUP,
- (intptr_t)&setup, FKIOCTL, kcred, &rval);
- if (ret) {
- DRM_ERROR("drm_agp_enable: failed");
- return (EIO);
+/**
+ * Enable the AGP bus.
+ *
+ * \param dev DRM device that has previously acquired AGP.
+ * \param mode Requested AGP mode.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired but not enabled, and calls
+ * \c agp_enable.
+ */
+int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
+{
+ if (!dev->agp || !dev->agp->acquired)
+ return -EINVAL;
+
+ dev->agp->mode = mode.mode;
+ {
+ agp_setup_t setup;
+ int ret, rval;
+ setup.agps_mode = (uint32_t)mode.mode;
+ if (ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_SETUP,
+ (intptr_t)&setup, FKIOCTL, kcred, &rval)) {
+ DRM_ERROR("AGPIOC_SETUP failed");
+ return -ret;
+ }
}
-
- dev->agp->base = dev->agp->agp_info.agpi_aperbase;
dev->agp->enabled = 1;
+ return 0;
+}
- DRM_DEBUG("drm_agp_enable: dev->agp->base=0x%lx", dev->agp->base);
- return (0);
+/* LINTED */
+int drm_agp_enable_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_agp_mode *mode = data;
+
+ return drm_agp_enable(dev, *mode);
}
-/*ARGSUSED*/
-int
-drm_agp_alloc(DRM_IOCTL_ARGS)
+/**
+ * Allocate AGP memory.
+ *
+ * \param inode device inode.
+ * \param file_priv file private pointer.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired, allocates the
+ * memory via alloc_agp() and creates a drm_agp_mem entry for it.
+ */
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
- DRM_DEVICE;
- drm_agp_mem_t *entry;
- agp_allocate_t alloc;
- drm_agp_buffer_t request;
- int pages;
+ struct drm_agp_mem *entry;
+ agp_allocate_t alloc;
+ unsigned long pages;
int ret, rval;
if (!dev->agp || !dev->agp->acquired)
- return (EINVAL);
+ return -EINVAL;
+ if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
+ return -ENOMEM;
- DRM_COPYFROM_WITH_RETURN(&request, (void *)data, sizeof (request));
+ (void) memset(entry, 0, sizeof(*entry));
- entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
+ pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- pages = btopr(request.size);
- alloc.agpa_pgcount = pages;
+ alloc.agpa_pgcount = (uint32_t) pages;
alloc.agpa_type = AGP_NORMAL;
ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_ALLOCATE,
(intptr_t)&alloc, FKIOCTL, kcred, &rval);
if (ret) {
- DRM_ERROR("drm_agp_alloc: AGPIOC_ALLOCATE failed, ret=%d", ret);
- kmem_free(entry, sizeof (*entry));
- return (ret);
+ DRM_ERROR("AGPIOC_ALLOCATE failed");
+ kfree(entry, sizeof (*entry));
+ return -ret;
}
+ entry->handle = alloc.agpa_key + DRM_AGP_KEY_OFFSET;
entry->bound = 0;
- entry->pages = pages;
- entry->handle = (void*)(uintptr_t)(alloc.agpa_key + DRM_AGP_KEY_OFFSET);
- entry->prev = NULL;
- entry->phys_addr = (void*)(uintptr_t)alloc.agpa_physical;
- entry->next = dev->agp->memory;
- if (dev->agp->memory)
- dev->agp->memory->prev = entry;
- dev->agp->memory = entry;
-
- DRM_DEBUG("entry->phys_addr %lx", entry->phys_addr);
-
- /* physical is used only by i810 driver */
- request.physical = alloc.agpa_physical;
- request.handle = (unsigned long)entry->handle;
-
- /*
- * If failed to ddi_copyout(), we will free allocated AGP memory
- * when closing drm
- */
- DRM_COPYTO_WITH_RETURN((void *)data, &request, sizeof (request));
-
- return (0);
+ entry->pages = (int) pages;
+ list_add(&entry->head, &dev->agp->memory, (caddr_t)entry);
+
+ request->handle = entry->handle;
+ request->physical = alloc.agpa_physical;
+
+ return 0;
}
-/*ARGSUSED*/
-static drm_agp_mem_t *
-drm_agp_lookup_entry(drm_device_t *dev, void *handle)
+/* LINTED */
+int drm_agp_alloc_ioctl(DRM_IOCTL_ARGS)
{
- drm_agp_mem_t *entry;
+ struct drm_agp_buffer *request = data;
- for (entry = dev->agp->memory; entry; entry = entry->next) {
+ return drm_agp_alloc(dev, request);
+}
+
+/**
+ * Search for the AGP memory entry associated with a handle.
+ *
+ * \param dev DRM device structure.
+ * \param handle AGP memory handle.
+ * \return pointer to the drm_agp_mem structure associated with \p handle.
+ *
+ * Walks through drm_agp_head::memory until finding a matching handle.
+ */
+static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
+ unsigned long handle)
+{
+ struct drm_agp_mem *entry;
+
+ list_for_each_entry(entry, struct drm_agp_mem, &dev->agp->memory, head) {
if (entry->handle == handle)
- return (entry);
+ return entry;
}
-
- return (NULL);
+ return NULL;
}
-/*ARGSUSED*/
-int
-drm_agp_unbind(DRM_IOCTL_ARGS)
+/**
+ * Unbind AGP memory from the GATT (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and acquired, looks-up the AGP memory
+ * entry and passes it to the unbind_agp() function.
+ */
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
{
- DRM_DEVICE;
- agp_unbind_t unbind;
- drm_agp_binding_t request;
- drm_agp_mem_t *entry;
- int ret, rval;
+ struct drm_agp_mem *entry;
+ int ret;
if (!dev->agp || !dev->agp->acquired)
- return (EINVAL);
-
- DRM_COPYFROM_WITH_RETURN(&request, (void *)data, sizeof (request));
-
- if (!(entry = drm_agp_lookup_entry(dev, (void *)request.handle)))
- return (EINVAL);
+ return -EINVAL;
+ if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+ return -EINVAL;
if (!entry->bound)
- return (EINVAL);
-
- unbind.agpu_pri = 0;
- unbind.agpu_key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
-
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_UNBIND,
- (intptr_t)&unbind, FKIOCTL, kcred, &rval);
- if (ret) {
- DRM_ERROR("drm_agp_unbind: AGPIOC_UNBIND failed");
- return (EIO);
+ return -EINVAL;
+ {
+ agp_unbind_t unbind;
+ int rval;
+ unbind.agpu_pri = 0;
+ unbind.agpu_key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
+ if (ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_UNBIND,
+ (intptr_t)&unbind, FKIOCTL, kcred, &rval)) {
+ DRM_ERROR("AGPIOC_UNBIND failed");
+ return -ret;
+ }
}
- entry->bound = 0;
- return (0);
+ entry->bound = 0;
+ return ret;
}
-/*ARGSUSED*/
-int
-drm_agp_bind(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_agp_unbind_ioctl(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_agp_binding_t request;
- drm_agp_mem_t *entry;
- int start;
- uint_t key;
-
- if (!dev->agp || !dev->agp->acquired)
- return (EINVAL);
+ struct drm_agp_binding *request = data;
- DRM_COPYFROM_WITH_RETURN(&request, (void *)data, sizeof (request));
+ return drm_agp_unbind(dev, request);
+}
- entry = drm_agp_lookup_entry(dev, (void *)request.handle);
- if (!entry || entry->bound)
- return (EINVAL);
+/**
+ * Bind AGP memory into the GATT (ioctl)
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and that no memory
+ * is currently bound into the GATT. Looks-up the AGP memory entry and passes
+ * it to bind_agp() function.
+ */
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+ struct drm_agp_mem *entry;
+ int retcode;
+ int page;
- key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
- start = btopr(request.offset);
- if (drm_agp_bind_memory(key, start, dev)) {
- DRM_ERROR("drm_agp_bind: failed key=%x, start=0x%x, "
- "agp_base=0x%lx", key, start, dev->agp->base);
- return (EIO);
+ if (!dev->agp || !dev->agp->acquired)
+ return -EINVAL;
+ if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+ return -EINVAL;
+ if (entry->bound)
+ return -EINVAL;
+ page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
+ {
+ uint_t key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
+ if (retcode = drm_agp_bind_memory(key, page, dev)) {
+ DRM_ERROR("failed key=0x%x, page=0x%x, "
+ "agp_base=0x%lx", key, page, dev->agp->base);
+ return retcode;
+ }
}
+ entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+ DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
+ dev->agp->base, entry->bound);
+ return 0;
+}
- entry->bound = dev->agp->base + (start << AGP_PAGE_SHIFT);
+/* LINTED */
+int drm_agp_bind_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_agp_binding *request = data;
- return (0);
+ return drm_agp_bind(dev, request);
}
-/*ARGSUSED*/
-int
-drm_agp_free(DRM_IOCTL_ARGS)
+/**
+ * Free AGP memory (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and looks up the
+ * AGP memory entry. If the memory it's currently bound, unbind it via
+ * unbind_agp(). Frees it via free_agp() as well as the entry itself
+ * and unlinks from the doubly linked list it's inserted in.
+ */
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
{
- DRM_DEVICE;
- drm_agp_buffer_t request;
- drm_agp_mem_t *entry;
- int ret, rval;
- int agpu_key;
+ struct drm_agp_mem *entry;
- DRM_COPYFROM_WITH_RETURN(&request, (void *)data, sizeof (request));
if (!dev->agp || !dev->agp->acquired)
- return (EINVAL);
- if (!(entry = drm_agp_lookup_entry(dev, (void *)request.handle)))
- return (EINVAL);
+ return -EINVAL;
+ if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+ return -EINVAL;
if (entry->bound)
- (void) drm_agp_unbind_memory(request.handle, dev);
-
- if (entry == dev->agp->memory)
- dev->agp->memory = entry->next;
- if (entry->prev)
- entry->prev->next = entry->next;
- if (entry->next)
- entry->next->prev = entry->prev;
-
- agpu_key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_DEALLOCATE,
- (intptr_t)agpu_key, FKIOCTL, kcred, &rval);
- if (ret) {
- DRM_ERROR("drm_agp_free: AGPIOC_DEALLOCATE failed,"
- "akey=%d, ret=%d", agpu_key, ret);
- return (EIO);
+ (void) drm_agp_unbind_memory(request->handle, dev);
+
+ list_del(&entry->head);
+ {
+ int agpu_key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
+ int ret, rval;
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_DEALLOCATE,
+ (intptr_t)agpu_key, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("AGPIOC_DEALLOCATE failed,"
+ "akey=%d, ret=%d", agpu_key, ret);
+ return -ret;
+ }
}
- drm_free(entry, sizeof (*entry), DRM_MEM_AGPLISTS);
- return (0);
+ kfree(entry, sizeof (*entry));
+ return 0;
}
-/*ARGSUSED*/
-drm_agp_head_t *
-drm_agp_init(drm_device_t *dev)
+/* LINTED */
+int drm_agp_free_ioctl(DRM_IOCTL_ARGS)
{
- drm_agp_head_t *agp = NULL;
- int retval, rval;
+ struct drm_agp_buffer *request = data;
+
+ return drm_agp_free(dev, request);
+}
- agp = kmem_zalloc(sizeof (drm_agp_head_t), KM_SLEEP);
+/**
+ * Initialize the AGP resources.
+ *
+ * \return pointer to a drm_agp_head structure.
+ *
+ * Gets the drm_agp_t structure which is made available by the agpgart module
+ * via the inter_module_* functions. Creates and initializes a drm_agp_head
+ * structure.
+ */
+struct drm_agp_head *drm_agp_init(struct drm_device *dev)
+{
+ struct drm_agp_head *head = NULL;
+ int ret, rval;
- retval = ldi_ident_from_dip(dev->dip, &agp->agpgart_li);
- if (retval != 0) {
- DRM_ERROR("drm_agp_init: failed to get layerd ident, retval=%d",
- retval);
+ if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
+ return NULL;
+ (void) memset((void *)head, 0, sizeof(*head));
+ ret = ldi_ident_from_dip(dev->devinfo, &head->agpgart_li);
+ if (ret) {
+ DRM_ERROR("failed to get layerd ident, ret=%d", ret);
goto err_1;
}
- retval = ldi_open_by_name(AGP_DEVICE, FEXCL, kcred,
- &agp->agpgart_lh, agp->agpgart_li);
- if (retval != 0) {
- DRM_ERROR("drm_agp_init: failed to open %s, retval=%d",
- AGP_DEVICE, retval);
+ ret = ldi_open_by_name(AGP_DEVICE, FEXCL, kcred,
+ &head->agpgart_lh, head->agpgart_li);
+ if (ret) {
+ DRM_ERROR("failed to open %s, ret=%d", AGP_DEVICE, ret);
goto err_2;
}
- retval = ldi_ioctl(agp->agpgart_lh, AGPIOC_INFO,
- (intptr_t)&agp->agp_info, FKIOCTL, kcred, &rval);
-
- if (retval != 0) {
- DRM_ERROR("drm_agp_init: failed to get agpinfo, retval=%d",
- retval);
+ ret = ldi_ioctl(head->agpgart_lh, AGPIOC_INFO,
+ (intptr_t)&head->agp_info, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("failed to get agpinfo, ret=%d", ret);
goto err_3;
}
-
- return (agp);
+ INIT_LIST_HEAD(&head->memory);
+ head->base = head->agp_info.agpi_aperbase;
+ return head;
err_3:
- (void) ldi_close(agp->agpgart_lh, FEXCL, kcred);
-
+ (void) ldi_close(head->agpgart_lh, FEXCL, kcred);
err_2:
- ldi_ident_release(agp->agpgart_li);
-
+ ldi_ident_release(head->agpgart_li);
err_1:
- kmem_free(agp, sizeof (drm_agp_head_t));
- return (NULL);
+ kfree(head, sizeof(*head));
+ return NULL;
}
-/*ARGSUSED*/
-void
-drm_agp_fini(drm_device_t *dev)
+/* LINTED */
+void *drm_agp_allocate_memory(size_t pages, uint32_t type, struct drm_device *dev)
{
- drm_agp_head_t *agp = dev->agp;
- (void) ldi_close(agp->agpgart_lh, FEXCL, kcred);
- ldi_ident_release(agp->agpgart_li);
- kmem_free(agp, sizeof (drm_agp_head_t));
- dev->agp = NULL;
+ return NULL;
}
-
-/*ARGSUSED*/
-void *
-drm_agp_allocate_memory(size_t pages, uint32_t type, drm_device_t *dev)
-{
- return (NULL);
-}
-
-/*ARGSUSED*/
-int
-drm_agp_free_memory(agp_allocate_t *handle, drm_device_t *dev)
+/* LINTED */
+int drm_agp_free_memory(agp_allocate_t *handle, struct drm_device *dev)
{
- return (1);
+ return 1;
}
-/*ARGSUSED*/
-int
-drm_agp_bind_memory(unsigned int key, uint32_t start, drm_device_t *dev)
+int drm_agp_bind_memory(unsigned int key, uint32_t start, struct drm_device *dev)
{
agp_bind_t bind;
- int ret, rval;
+ int ret, rval;
bind.agpb_pgstart = start;
bind.agpb_key = key;
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_BIND,
- (intptr_t)&bind, FKIOCTL, kcred, &rval);
- if (ret) {
- DRM_DEBUG("drm_agp_bind_meory: AGPIOC_BIND failed");
- return (EIO);
+ if (ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_BIND,
+ (intptr_t)&bind, FKIOCTL, kcred, &rval)) {
+ DRM_DEBUG("AGPIOC_BIND failed");
+ return -ret;
}
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_agp_unbind_memory(unsigned long handle, drm_device_t *dev)
+int drm_agp_unbind_memory(unsigned long handle, struct drm_device *dev)
{
+ struct drm_agp_mem *entry;
agp_unbind_t unbind;
- drm_agp_mem_t *entry;
int ret, rval;
if (!dev->agp || !dev->agp->acquired)
- return (EINVAL);
+ return -EINVAL;
- entry = drm_agp_lookup_entry(dev, (void *)handle);
+ entry = drm_agp_lookup_entry(dev, handle);
if (!entry || !entry->bound)
- return (EINVAL);
+ return -EINVAL;
unbind.agpu_pri = 0;
unbind.agpu_key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
-
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_UNBIND,
- (intptr_t)&unbind, FKIOCTL, kcred, &rval);
- if (ret) {
- DRM_ERROR("drm_agp_unbind: AGPIO_UNBIND failed");
- return (EIO);
+ if (ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_UNBIND,
+ (intptr_t)&unbind, FKIOCTL, kcred, &rval)) {
+ DRM_ERROR("AGPIO_UNBIND failed");
+ return -ret;
}
entry->bound = 0;
- return (0);
+ return 0;
}
-/*
+/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
*
@@ -505,83 +549,85 @@ drm_agp_unbind_memory(unsigned long handle, drm_device_t *dev)
* caller to handle that.
*/
int
-drm_agp_bind_pages(drm_device_t *dev,
+drm_agp_bind_pages(struct drm_device *dev,
pfn_t *pages,
unsigned long num_pages,
- uint32_t gtt_offset)
+ uint32_t gtt_offset,
+ unsigned int agp_type)
{
+ agp_gtt_info_t bind;
+ int ret, rval;
- agp_bind_pages_t bind;
- int ret, rval;
+ bind.agp_pgstart = gtt_offset / AGP_PAGE_SIZE;
+ bind.agp_npage = num_pages;
+ bind.agp_phyaddr = pages;
+ bind.agp_flags = agp_type;
- bind.agpb_pgstart = gtt_offset / AGP_PAGE_SIZE;
- bind.agpb_pgcount = num_pages;
- bind.agpb_pages = pages;
ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_PAGES_BIND,
(intptr_t)&bind, FKIOCTL, kcred, &rval);
if (ret) {
DRM_ERROR("AGPIOC_PAGES_BIND failed ret %d", ret);
- return (ret);
+ return -ret;
}
- return (0);
+ return 0;
}
int
-drm_agp_unbind_pages(drm_device_t *dev,
+drm_agp_unbind_pages(struct drm_device *dev,
+ pfn_t *pages,
unsigned long num_pages,
uint32_t gtt_offset,
+ pfn_t scratch,
uint32_t type)
{
+ agp_gtt_info_t unbind;
+ int ret, rval;
- agp_unbind_pages_t unbind;
- int ret, rval;
+ unbind.agp_pgstart = gtt_offset / AGP_PAGE_SIZE;
+ unbind.agp_npage = num_pages;
+ unbind.agp_type = type;
+ unbind.agp_phyaddr = pages;
+ unbind.agp_scratch = scratch;
- unbind.agpb_pgstart = gtt_offset / AGP_PAGE_SIZE;
- unbind.agpb_pgcount = num_pages;
- unbind.agpb_type = type;
ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_PAGES_UNBIND,
(intptr_t)&unbind, FKIOCTL, kcred, &rval);
if (ret) {
- DRM_DEBUG("drm_agp_unbind_pages AGPIOC_PAGES_UNBIND failed");
- return (ret);
+ DRM_ERROR("AGPIOC_PAGES_UNBIND failed %d", ret);
+ return -ret;
}
- return (0);
+ return 0;
}
-/*
- * Certain Intel chipsets contains a global write buffer, and this can require
- * flushing from the drm or X.org to make sure all data has hit RAM before
- * initiating a GPU transfer, due to a lack of coherency with the integrated
- * graphics device and this buffer.
- */
-void
-drm_agp_chipset_flush(struct drm_device *dev)
+void drm_agp_chipset_flush(struct drm_device *dev)
{
int ret, rval;
- DRM_DEBUG("agp_chipset_flush");
ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_FLUSHCHIPSET,
(intptr_t)0, FKIOCTL, kcred, &rval);
- if (ret != 0) {
- DRM_ERROR("Failed to drm_agp_chipset_flush ret %d", ret);
- }
+ if (ret)
+ DRM_ERROR("AGPIOC_FLUSHCHIPSET failed, ret=%d", ret);
}
-/*
- * The pages are evict on suspend, so re-bind it at resume time
- */
-void
-drm_agp_rebind(struct drm_device *dev)
+int
+drm_agp_rw_gtt(struct drm_device *dev,
+ unsigned long num_pages,
+ uint32_t gtt_offset,
+ void *gttp,
+ uint32_t type)
{
+ agp_rw_gtt_t gtt_info;
int ret, rval;
- if (!dev->agp) {
- return;
- }
-
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_PAGES_REBIND,
- (intptr_t)0, FKIOCTL, kcred, &rval);
- if (ret != 0) {
- DRM_ERROR("rebind failed %d", ret);
+ gtt_info.pgstart = gtt_offset / AGP_PAGE_SIZE;
+ gtt_info.pgcount = num_pages;
+ gtt_info.addr = gttp;
+ /* read = 0 write = 1 */
+ gtt_info.type = type;
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_RW_GTT,
+ (intptr_t)&gtt_info, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("AGPIOC_RW_GTT failed %d", ret);
+ return -ret;
}
+ return 0;
}
diff --git a/usr/src/uts/common/io/drm/drm_auth.c b/usr/src/uts/common/io/drm/drm_auth.c
index 23fec5e..c6c0a66 100644
--- a/usr/src/uts/common/io/drm/drm_auth.c
+++ b/usr/src/uts/common/io/drm/drm_auth.c
@@ -1,13 +1,22 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
*/
/*
- * drm_auth.h -- IOCTLs for authentication -*- linux-c -*-
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
*/
+
+/**
+ * \file drm_auth.c
+ * IOCTLs for authentication
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
/*
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -30,144 +39,199 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "drmP.h"
-static int
-drm_hash_magic(drm_magic_t magic)
+/**
+ * Generate a hash key from a magic.
+ *
+ * \param magic magic.
+ * \return hash key.
+ *
+ * The key is the modulus of the hash table size, #DRM_HASH_SIZE, which must be
+ * a power of 2.
+ */
+static int drm_hash_magic(drm_magic_t magic)
{
- return (magic & (DRM_HASH_SIZE-1));
+ return magic & (DRM_HASH_SIZE - 1);
}
-drm_file_t *
-drm_find_file(drm_device_t *dev, drm_magic_t magic)
+/**
+ * Find the file with the given magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches in drm_device::magiclist within all files with the same hash key
+ * the one with matching magic number, while holding the drm_device::struct_mutex
+ * lock.
+ */
+static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
{
- drm_file_t *retval = NULL;
- drm_magic_entry_t *pt;
- int hash;
+ struct drm_file *retval = NULL;
+ struct drm_magic_entry *pt;
+ int hash = drm_hash_magic(magic);
+ struct drm_device *dev = master->minor->dev;
- hash = drm_hash_magic(magic);
- for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
+ mutex_lock(&dev->struct_mutex);
+ for (pt = master->magiclist[hash].head; pt; pt = pt->next) {
if (pt->magic == magic) {
retval = pt->priv;
break;
}
}
-
- return (retval);
+ mutex_unlock(&dev->struct_mutex);
+ return retval;
}
-static int
-drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
+/**
+ * Adds a magic number.
+ *
+ * \param dev DRM device.
+ * \param priv file private data.
+ * \param magic magic number.
+ *
+ * Creates a drm_magic_entry structure and appends to the linked list
+ * associated the magic number hash key in drm_device::magiclist, while holding
+ * the drm_device::struct_mutex lock.
+ */
+static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
+ drm_magic_t magic)
{
- int hash;
- drm_magic_entry_t *entry;
+ int hash;
+ struct drm_magic_entry *entry;
+ struct drm_device *dev = master->minor->dev;
+
+ DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic);
- entry = drm_alloc(sizeof (*entry), DRM_MEM_MAGIC);
+ entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
if (!entry)
- return (ENOMEM);
+ return -ENOMEM;
+ (void) memset(entry, 0, sizeof(*entry));
entry->magic = magic;
- entry->priv = priv;
- entry->next = NULL;
+ entry->priv = priv;
+ entry->next = NULL;
- DRM_LOCK();
- if (dev->magiclist[hash].tail) {
- dev->magiclist[hash].tail->next = entry;
- dev->magiclist[hash].tail = entry;
+ mutex_lock(&dev->struct_mutex);
+ if (master->magiclist[hash].tail) {
+ master->magiclist[hash].tail->next = entry;
+ master->magiclist[hash].tail = entry;
} else {
- dev->magiclist[hash].head = entry;
- dev->magiclist[hash].tail = entry;
+ master->magiclist[hash].head = entry;
+ master->magiclist[hash].tail = entry;
}
- DRM_UNLOCK();
+ mutex_unlock(&dev->struct_mutex);
- return (0);
+ return 0;
}
-int
-drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
+/**
+ * Remove a magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches and unlinks the entry in drm_device::magiclist with the magic
+ * number hash key, while holding the drm_device::struct_mutex lock.
+ */
+int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
{
- drm_magic_entry_t *prev = NULL;
- drm_magic_entry_t *pt;
- int hash;
+ struct drm_magic_entry *prev = NULL;
+ struct drm_magic_entry *pt;
+ int hash;
+ struct drm_device *dev = master->minor->dev;
- DRM_DEBUG("drm_remove_magic : %d", magic);
+ DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic);
- DRM_LOCK();
- for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
+ mutex_lock(&dev->struct_mutex);
+ for (pt = master->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
if (pt->magic == magic) {
- if (dev->magiclist[hash].head == pt) {
- dev->magiclist[hash].head = pt->next;
+ if (master->magiclist[hash].head == pt) {
+ master->magiclist[hash].head = pt->next;
}
- if (dev->magiclist[hash].tail == pt) {
- dev->magiclist[hash].tail = prev;
+ if (master->magiclist[hash].tail == pt) {
+ master->magiclist[hash].tail = prev;
}
if (prev) {
prev->next = pt->next;
}
- DRM_UNLOCK();
+ mutex_unlock(&dev->struct_mutex);
drm_free(pt, sizeof (*pt), DRM_MEM_MAGIC);
- return (0);
+ return 0;
}
}
- DRM_UNLOCK();
+ mutex_unlock(&dev->struct_mutex);
- return (EINVAL);
+ return -EINVAL;
}
-/*ARGSUSED*/
-int
-drm_getmagic(DRM_IOCTL_ARGS)
+/**
+ * Get a unique magic number (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a resulting drm_auth structure.
+ * \return zero on success, or a negative number on failure.
+ *
+ * If there is a magic number in drm_file::magic then use it, otherwise
+ * searches an unique non-zero magic number and add it associating it with \p
+ * file_priv.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
+ */
+/* LINTED */
+int drm_getmagic(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
static drm_magic_t sequence = 0;
- drm_auth_t auth;
+ struct drm_auth *auth = data;
/* Find unique magic */
- if (fpriv->magic) {
- auth.magic = fpriv->magic;
+ if (file->magic) {
+ auth->magic = file->magic;
} else {
do {
int old = sequence;
- auth.magic = old+1;
- if (!atomic_cmpset_int(&sequence, old, auth.magic))
+ auth->magic = old + 1;
+ if (!atomic_cmpset_int(&sequence, old, auth->magic))
continue;
- } while (drm_find_file(dev, auth.magic));
- fpriv->magic = auth.magic;
- (void) drm_add_magic(dev, fpriv, auth.magic);
+ } while (drm_find_file(file->master, auth->magic));
+ file->magic = auth->magic;
+ (void) drm_add_magic(file->master, file, auth->magic);
}
+ DRM_DEBUG("%u\n", auth->magic);
- DRM_DEBUG("drm_getmagic: %u", auth.magic);
-
- DRM_COPYTO_WITH_RETURN((void *)data, &auth, sizeof (auth));
-
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_authmagic(DRM_IOCTL_ARGS)
+/**
+ * Authenticate with a magic.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_auth structure.
+ * \return zero if authentication successed, or a negative number otherwise.
+ *
+ * Checks if \p file_priv is associated with the magic number passed in \arg.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
+ */
+/* LINTED */
+int drm_authmagic(DRM_IOCTL_ARGS)
{
- drm_auth_t auth;
- drm_file_t *file;
- DRM_DEVICE;
-
- DRM_COPYFROM_WITH_RETURN(&auth, (void *)data, sizeof (auth));
-
- if ((file = drm_find_file(dev, auth.magic))) {
- file->authenticated = 1;
- (void) drm_remove_magic(dev, auth.magic);
- return (0);
+ struct drm_auth *auth = data;
+ struct drm_file *file_priv;
+
+ DRM_DEBUG("%u\n", auth->magic);
+ if ((file_priv = drm_find_file(file->master, auth->magic))) {
+ file_priv->authenticated = 1;
+ (void) drm_remove_magic(file->master, auth->magic);
+ return 0;
}
- return (EINVAL);
+ return -EINVAL;
}
diff --git a/usr/src/uts/common/io/drm/drm_bufs.c b/usr/src/uts/common/io/drm/drm_bufs.c
index ec01d37..2023f1d 100644
--- a/usr/src/uts/common/io/drm/drm_bufs.c
+++ b/usr/src/uts/common/io/drm/drm_bufs.c
@@ -1,11 +1,21 @@
/*
- * drm_bufs.h -- Generic buffer template -*- linux-c -*-
- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*/
+
+/**
+ * \file drm_bufs.c
+ * Generic buffer template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2012, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,323 +36,465 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
*/
#include "drmP.h"
-#include <gfx_private.h>
#include "drm_io32.h"
+#ifdef _LP64
+extern caddr_t smmap64(caddr_t, size_t, int, int, int, off_t);
+#define drm_smmap smmap64
+#elif defined(_SYSCALL32_IMPL) || defined(_ILP32)
+extern caddr_t smmap32(caddr32_t, size32_t, int, int, int, off32_t);
+#define drm_smmap smmap32
+#else
+#error "No define for _LP64, _SYSCALL32_IMPL or _ILP32"
+#endif
-#define PAGE_MASK (PAGE_SIZE-1)
-#define round_page(x) (((x) + PAGE_MASK) & ~PAGE_MASK)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define round_page(x) (((x) + (PAGE_SIZE - 1)) & PAGE_MASK)
-/*
- * Compute order. Can be made faster.
- */
-int
-drm_order(unsigned long size)
+static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
+ struct drm_local_map *map)
{
- int order = 0;
- unsigned long tmp = size;
-
- while (tmp >>= 1)
- order ++;
-
- if (size & ~(1 << order))
- ++order;
+ struct drm_map_list *entry;
+ list_for_each_entry(entry, struct drm_map_list, &dev->maplist, head) {
+ /*
+ * Because the kernel-userspace ABI is fixed at a 32-bit offset
+ * while PCI resources may live above that, we ignore the map
+ * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
+ * It is assumed that each driver will have only one resource of
+ * each type.
+ */
+ if (!entry->map ||
+ map->type != entry->map->type ||
+ entry->master != dev->primary->master)
+ continue;
+ switch (map->type) {
+ case _DRM_SHM:
+ if (map->flags != _DRM_CONTAINS_LOCK)
+ break;
+ return entry;
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+ if ((entry->map->offset & 0xffffffff) ==
+ (map->offset & 0xffffffff))
+ return entry;
+ default: /* Make gcc happy */
+ ;
+ }
+ if (entry->map->offset == map->offset)
+ return entry;
+ }
- return (order);
+ return NULL;
}
-static inline drm_local_map_t *
-drm_find_map(drm_device_t *dev, u_offset_t offset, int type)
+int drm_map_handle(struct drm_device *dev, struct drm_map_list *list)
{
- drm_local_map_t *map;
+ int newid, ret;
- TAILQ_FOREACH(map, &dev->maplist, link) {
- if ((map->type == type) && ((map->offset == offset) ||
- (map->flags == _DRM_CONTAINS_LOCK) &&
- (map->type == _DRM_SHM)))
- return (map);
- }
+ ret = idr_get_new_above(&dev->map_idr, list, 1, &newid);
+ if (ret < 0)
+ return ret;
- return (NULL);
+ list->user_token = newid << PAGE_SHIFT;
+ return 0;
}
-int drm_addmap(drm_device_t *dev, unsigned long offset,
- unsigned long size, drm_map_type_t type,
- drm_map_flags_t flags, drm_local_map_t **map_ptr)
+/**
+ * Core function to create a range of memory available for mapping by a
+ * non-root process.
+ *
+ * Adjusts the memory offset to its absolute value according to the mapping
+ * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
+ * applicable and if supported by the kernel.
+ */
+static int drm_addmap_core(struct drm_device *dev, unsigned long offset,
+ unsigned long size, enum drm_map_type type,
+ enum drm_map_flags flags,
+ struct drm_map_list ** maplist)
{
- drm_local_map_t *map;
- caddr_t kva;
- int retval;
-
- /*
- * Only allow shared memory to be removable since we only keep
- * enough book keeping information about shared memory to allow
- * for removal when processes fork.
- */
- if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM)
- return (EINVAL);
- if ((offset & PAGE_MASK) || (size & PAGE_MASK))
- return (EINVAL);
- if (offset + size < offset)
- return (EINVAL);
-
- /*
- * Check if this is just another version of a kernel-allocated
- * map, and just hand that back if so.
- */
- map = drm_find_map(dev, offset, type);
- if (map != NULL) {
- goto done;
- }
+ struct drm_local_map *map;
+ struct drm_map_list *list;
+ /* LINTED */
+ unsigned long user_token;
+ int ret;
- /*
- * Allocate a new map structure, fill it in, and do any
- * type-specific initialization necessary.
- */
- map = drm_alloc(sizeof (*map), DRM_MEM_MAPS);
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
if (!map)
- return (ENOMEM);
+ return -ENOMEM;
map->offset = offset;
map->size = size;
- map->type = type;
map->flags = flags;
+ map->type = type;
+
+ /* Only allow shared memory to be removable since we only keep enough
+ * book keeping information about shared memory to allow for removal
+ * when processes fork.
+ */
+ if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
+ kfree(map, sizeof(*map));
+ return -EINVAL;
+ }
+ DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
+ (unsigned long long)map->offset, map->size, map->type);
+
+ /* page-align _DRM_SHM maps. They are allocated here so there is no security
+ * hole created by that and it works around various broken drivers that use
+ * a non-aligned quantity to map the SAREA. --BenH
+ */
+ if (map->type == _DRM_SHM)
+ map->size = PAGE_ALIGN(map->size);
+
+ if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
+ kfree(map, sizeof(*map));
+ return -EINVAL;
+ }
+
+ if (map->offset + map->size < map->offset) {
+ kfree(map, sizeof(*map));
+ return -EINVAL;
+ }
+
+ map->mtrr = -1;
+ map->handle = NULL;
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
- retval = drm_ioremap(dev, map);
- if (retval)
- return (retval);
- break;
+ /* Some drivers preinitialize some maps, without the X Server
+ * needing to be aware of it. Therefore, we just return success
+ * when the server tries to create a duplicate map.
+ */
+ list = drm_find_matching_map(dev, map);
+ if (list != NULL) {
+ if (list->map->size != map->size) {
+ DRM_DEBUG("Matching maps of type %d with "
+ "mismatched sizes, (%ld vs %ld)\n",
+ map->type, map->size,
+ list->map->size);
+ list->map->size = map->size;
+ }
+ kfree(map, sizeof(struct drm_local_map));
+ *maplist = list;
+ return 0;
+ }
+
+ if (map->type == _DRM_REGISTERS) {
+ map->handle = ioremap(map->offset, map->size);
+ if (!map->handle) {
+ kfree(map, sizeof(struct drm_local_map));
+ return -ENOMEM;
+ }
+ }
+
+ break;
case _DRM_SHM:
- /*
- * ddi_umem_alloc() grants page-aligned memory. We needn't
- * handle alignment issue here.
- */
- map->handle = ddi_umem_alloc(map->size,
- DDI_UMEM_NOSLEEP, &map->drm_umem_cookie);
+ list = drm_find_matching_map(dev, map);
+ if (list != NULL) {
+ if(list->map->size != map->size) {
+ DRM_DEBUG("Matching maps of type %d with "
+ "mismatched sizes, (%ld vs %ld)\n",
+ map->type, map->size, list->map->size);
+ list->map->size = map->size;
+ }
+
+ kfree(map, sizeof(struct drm_local_map));
+ *maplist = list;
+ return 0;
+ }
+ map->handle = ddi_umem_alloc(map->size, DDI_UMEM_NOSLEEP, &map->umem_cookie);
+ DRM_DEBUG("%lu %p\n",
+ map->size, map->handle);
if (!map->handle) {
- DRM_ERROR("drm_addmap: ddi_umem_alloc failed");
- drm_free(map, sizeof (*map), DRM_MEM_MAPS);
- return (ENOMEM);
+ kfree(map, sizeof(struct drm_local_map));
+ return -ENOMEM;
}
- /*
- * record only low 32-bit of this handle, since 32-bit
- * user app is incapable of passing in 64bit offset when
- * doing mmap.
- */
map->offset = (uintptr_t)map->handle;
- map->offset &= 0xffffffffUL;
if (map->flags & _DRM_CONTAINS_LOCK) {
/* Prevent a 2nd X Server from creating a 2nd lock */
- if (dev->lock.hw_lock != NULL) {
- ddi_umem_free(map->drm_umem_cookie);
- drm_free(map, sizeof (*map), DRM_MEM_MAPS);
- return (EBUSY);
+ if (dev->primary->master->lock.hw_lock != NULL) {
+ ddi_umem_free(map->umem_cookie);
+ kfree(map, sizeof(struct drm_local_map));
+ return -EBUSY;
}
- dev->lock.hw_lock = map->handle; /* Pointer to lock */
+ dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
}
- map->dev_addr = map->handle;
+ break;
+ case _DRM_AGP: {
+ caddr_t kvaddr;
+
+ if (!drm_core_has_AGP(dev)) {
+ kfree(map, sizeof(struct drm_local_map));
+ return -EINVAL;
+ }
+
+ map->offset += dev->agp->base;
+ kvaddr = gfxp_alloc_kernel_space(map->size);
+ if (!kvaddr) {
+ DRM_ERROR("failed to alloc AGP aperture");
+ kfree(map, sizeof(struct drm_local_map));
+ return -EPERM;
+ }
+ gfxp_load_kernel_space(map->offset, map->size,
+ GFXP_MEMORY_WRITECOMBINED, kvaddr);
+ map->handle = (void *)(uintptr_t)kvaddr;
+ map->umem_cookie = gfxp_umem_cookie_init(map->handle, map->size);
+ if (!map->umem_cookie) {
+ DRM_ERROR("gfxp_umem_cookie_init() failed");
+ gfxp_unmap_kernel_space(map->handle, map->size);
+ kfree(map, sizeof(struct drm_local_map));
+ return (-ENOMEM);
+ }
+ break;
+ }
+ case _DRM_GEM:
+ DRM_ERROR("tried to addmap GEM object\n");
break;
case _DRM_SCATTER_GATHER:
if (!dev->sg) {
- drm_free(map, sizeof (*map), DRM_MEM_MAPS);
- return (EINVAL);
+ kfree(map, sizeof(struct drm_local_map));
+ return -EINVAL;
}
map->offset += (uintptr_t)dev->sg->virtual;
- map->handle = (void *)(uintptr_t)map->offset;
- map->dev_addr = dev->sg->virtual;
- map->dev_handle = dev->sg->dmah_sg->acc_hdl;
+ map->handle = (void *)map->offset;
+ map->umem_cookie = gfxp_umem_cookie_init(map->handle, map->size);
+ if (!map->umem_cookie) {
+ DRM_ERROR("gfxp_umem_cookie_init() failed");
+ kfree(map, sizeof(struct drm_local_map));
+ return (-ENOMEM);
+ }
break;
-
case _DRM_CONSISTENT:
DRM_ERROR("%d DRM_AGP_CONSISTENT", __LINE__);
- return (ENOTSUP);
- case _DRM_AGP:
- map->offset += dev->agp->base;
- kva = gfxp_map_kernel_space(map->offset, map->size,
- GFXP_MEMORY_WRITECOMBINED);
- if (kva == 0) {
- drm_free(map, sizeof (*map), DRM_MEM_MAPS);
- cmn_err(CE_WARN,
- "drm_addmap: failed to map AGP aperture");
- return (ENOMEM);
- }
- map->handle = (void *)(uintptr_t)kva;
- map->dev_addr = kva;
- break;
+ kfree(map, sizeof(struct drm_local_map));
+ return -ENOTSUP;
default:
- drm_free(map, sizeof (*map), DRM_MEM_MAPS);
- return (EINVAL);
+ kfree(map, sizeof(struct drm_local_map));
+ return -EINVAL;
}
- TAILQ_INSERT_TAIL(&dev->maplist, map, link);
+ list = kmalloc(sizeof(*list), GFP_KERNEL);
+ if (!list) {
+ if (map->type == _DRM_REGISTERS)
+ iounmap(map->handle);
+ kfree(map, sizeof(struct drm_local_map));
+ return -EINVAL;
+ }
+ (void) memset(list, 0, sizeof(*list));
+ list->map = map;
+
+ mutex_lock(&dev->struct_mutex);
+ list_add(&list->head, &dev->maplist, (caddr_t)list);
+
+ /* Assign a 32-bit handle */
+ /* We do it here so that dev->struct_mutex protects the increment */
+ user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
+ map->offset;
+ ret = drm_map_handle(dev, list);
+ if (ret) {
+ if (map->type == _DRM_REGISTERS)
+ iounmap(map->handle);
+ kfree(map, sizeof(struct drm_local_map));
+ kfree(list, sizeof(struct drm_map_list));
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
-done:
- /* Jumped to, with lock held, when a kernel map is found. */
- *map_ptr = map;
+ mutex_unlock(&dev->struct_mutex);
- return (0);
+ if (!(map->flags & _DRM_DRIVER))
+ list->master = dev->primary->master;
+ *maplist = list;
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_addmap_ioctl(DRM_IOCTL_ARGS)
+int drm_addmap(struct drm_device *dev, unsigned long offset,
+ unsigned long size, enum drm_map_type type,
+ enum drm_map_flags flags, struct drm_local_map ** map_ptr)
{
- drm_map_t request;
- drm_local_map_t *map;
+ struct drm_map_list *list;
+ int rc;
+
+ rc = drm_addmap_core(dev, offset, size, type, flags, &list);
+ if (!rc)
+ *map_ptr = list->map;
+ return rc;
+}
+
+/**
+ * Ioctl to specify a range of memory that is available for mapping by a
+ * non-root process.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_map structure.
+ * \return zero on success or a negative value on error.
+ *
+ */
+/* LINTED */
+int drm_addmap_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_map *map = data;
+ struct drm_map_list *maplist;
int err;
- DRM_DEVICE;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_map_32_t request32;
- DRM_COPYFROM_WITH_RETURN(&request32,
- (void *)data, sizeof (request32));
- request.offset = request32.offset;
- request.size = request32.size;
- request.type = request32.type;
- request.flags = request32.flags;
- request.mtrr = request32.mtrr;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request,
- (void *)data, sizeof (request));
-
- err = drm_addmap(dev, request.offset, request.size, request.type,
- request.flags, &map);
-
- if (err != 0)
- return (err);
-
- request.offset = map->offset;
- request.size = map->size;
- request.type = map->type;
- request.flags = map->flags;
- request.mtrr = map->mtrr;
- request.handle = (uintptr_t)map->handle;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_map_32_t request32;
- request32.offset = request.offset;
- request32.size = (uint32_t)request.size;
- request32.type = request.type;
- request32.flags = request.flags;
- request32.handle = request.handle;
- request32.mtrr = request.mtrr;
- DRM_COPYTO_WITH_RETURN((void *)data,
- &request32, sizeof (request32));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data,
- &request, sizeof (request));
- return (0);
+ if (!(DRM_SUSER(credp) || map->type == _DRM_AGP || map->type == _DRM_SHM))
+ return -EPERM;
+
+ err = drm_addmap_core(dev, map->offset, map->size, map->type,
+ map->flags, &maplist);
+
+ if (err)
+ return err;
+
+ /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
+ map->handle = maplist->user_token;
+ return 0;
}
-void
-drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
+/**
+ * Remove a map private from list and deallocate resources if the mapping
+ * isn't in use.
+ *
+ * Searches the map on drm_device::maplist, removes it from the list, see if
+ * its being used, and free any associate resource (such as MTRR's) if it's not
+ * being on use.
+ *
+ * \sa drm_addmap
+ */
+int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
{
- DRM_SPINLOCK_ASSERT(&dev->dev_lock);
+ struct drm_map_list *r_list = NULL, *list_t;
+ /* LINTED */
+ drm_dma_handle_t dmah;
+ int found = 0;
+ /* LINTED */
+ struct drm_master *master;
+
+ /* Find the list entry for the map and remove it */
+ list_for_each_entry_safe(r_list, list_t, struct drm_map_list, &dev->maplist, head) {
+ if (r_list->map == map) {
+ master = r_list->master;
+ list_del(&r_list->head);
+ (void) idr_remove(&dev->map_idr,
+ r_list->user_token >> PAGE_SHIFT);
+ kfree(r_list, sizeof(struct drm_map_list));
+ found = 1;
+ break;
+ }
+ }
- TAILQ_REMOVE(&dev->maplist, map, link);
+ if (!found)
+ return -EINVAL;
switch (map->type) {
case _DRM_REGISTERS:
- drm_ioremapfree(map);
- break;
+ iounmap(map->handle);
/* FALLTHROUGH */
case _DRM_FRAME_BUFFER:
- drm_ioremapfree(map);
break;
case _DRM_SHM:
- ddi_umem_free(map->drm_umem_cookie);
+ ddi_umem_free(map->umem_cookie);
break;
case _DRM_AGP:
- /*
- * we mapped AGP aperture into kernel space in drm_addmap,
- * here, unmap them and release kernel virtual address space
- */
- gfxp_unmap_kernel_space(map->dev_addr, map->size);
+ gfxp_umem_cookie_destroy(map->umem_cookie);
+ gfxp_unmap_kernel_space(map->handle, map->size);
break;
-
case _DRM_SCATTER_GATHER:
+ gfxp_umem_cookie_destroy(map->umem_cookie);
break;
case _DRM_CONSISTENT:
break;
- default:
+ case _DRM_GEM:
+ DRM_ERROR("tried to rmmap GEM object\n");
break;
}
+ kfree(map, sizeof(struct drm_local_map));
- drm_free(map, sizeof (*map), DRM_MEM_MAPS);
+ return 0;
}
-/*
- * Remove a map private from list and deallocate resources if the
- * mapping isn't in use.
- */
-/*ARGSUSED*/
-int
-drm_rmmap_ioctl(DRM_IOCTL_ARGS)
+int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
{
- DRM_DEVICE;
- drm_local_map_t *map;
- drm_map_t request;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_map_32_t request32;
- DRM_COPYFROM_WITH_RETURN(&request32,
- (void *)data, sizeof (drm_map_32_t));
- request.offset = request32.offset;
- request.size = request32.size;
- request.type = request32.type;
- request.flags = request32.flags;
- request.handle = request32.handle;
- request.mtrr = request32.mtrr;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request,
- (void *)data, sizeof (request));
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_rmmap_locked(dev, map);
+ mutex_unlock(&dev->struct_mutex);
- DRM_LOCK();
- TAILQ_FOREACH(map, &dev->maplist, link) {
- if (((uintptr_t)map->handle == (request.handle & 0xffffffff)) &&
- (map->flags & _DRM_REMOVABLE))
+ return ret;
+}
+
+/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
+ * the last close of the device, and this is necessary for cleanup when things
+ * exit uncleanly. Therefore, having userland manually remove mappings seems
+ * like a pointless exercise since they're going away anyway.
+ *
+ * One use case might be after addmap is allowed for normal users for SHM and
+ * gets used by drivers that the server doesn't need to care about. This seems
+ * unlikely.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_map structure.
+ * \return zero on success or a negative value on error.
+ */
+/* LINTED */
+int drm_rmmap_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_map *request = data;
+ struct drm_local_map *map = NULL;
+ struct drm_map_list *r_list;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry(r_list, struct drm_map_list, &dev->maplist, head) {
+ if (r_list->map &&
+ r_list->user_token == (unsigned long)request->handle &&
+ r_list->map->flags & _DRM_REMOVABLE) {
+ map = r_list->map;
break;
+ }
+ }
+
+ /* List has wrapped around to the head pointer, or its empty we didn't
+ * find anything.
+ */
+ if (list_empty(&dev->maplist) || !map) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
}
- /* No match found. */
- if (map == NULL) {
- DRM_UNLOCK();
- return (EINVAL);
+ /* Register and framebuffer maps are permanent */
+ if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
}
- drm_rmmap(dev, map);
- DRM_UNLOCK();
+ ret = drm_rmmap_locked(dev, map);
- return (0);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
}
-/*ARGSUSED*/
-static void
-drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
+/**
+ * Cleanup after an error on one of the addbufs() functions.
+ *
+ * \param dev DRM device.
+ * \param entry buffer entry where the error occurred.
+ *
+ * Frees any pages and buffers associated with the given entry.
+ */
+/* LINTED */
+static void drm_cleanup_buf_error(struct drm_device * dev,
+ struct drm_buf_entry * entry)
{
int i;
@@ -353,51 +505,41 @@ drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
"drm_cleanup_buf_error: not implemented");
}
}
- drm_free(entry->seglist,
- entry->seg_count *
- sizeof (*entry->seglist), DRM_MEM_SEGS);
+ kfree(entry->seglist, entry->seg_count * sizeof (*entry->seglist));
+
entry->seg_count = 0;
}
if (entry->buf_count) {
for (i = 0; i < entry->buf_count; i++) {
if (entry->buflist[i].dev_private) {
- drm_free(entry->buflist[i].dev_private,
- entry->buflist[i].dev_priv_size,
- DRM_MEM_BUFS);
+ kfree(entry->buflist[i].dev_private,
+ entry->buflist[i].dev_priv_size);
}
}
- drm_free(entry->buflist,
- entry->buf_count *
- sizeof (*entry->buflist), DRM_MEM_BUFS);
- entry->buflist = NULL;
+ kfree(entry->buflist, entry->buf_count * sizeof (*entry->buflist));
+
entry->buf_count = 0;
}
}
-/*ARGSUSED*/
-int
-drm_markbufs(DRM_IOCTL_ARGS)
-{
- DRM_DEBUG("drm_markbufs");
- return (EINVAL);
-}
-
-/*ARGSUSED*/
-int
-drm_infobufs(DRM_IOCTL_ARGS)
-{
- DRM_DEBUG("drm_infobufs");
- return (EINVAL);
-}
-
-static int
-drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
+/**
+ * Add AGP buffers for DMA transfers.
+ *
+ * \param dev struct drm_device to which the buffers are to be added.
+ * \param request pointer to a struct drm_buf_desc describing the request.
+ * \return zero on success or a negative number on failure.
+ *
+ * After some sanity checks creates a drm_buf structure for each buffer and
+ * reallocates the buffer list of the same size order to accommodate the new
+ * buffers.
+ */
+/* LINTED */
+int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request, cred_t *credp)
{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_entry_t *entry;
- drm_buf_t **temp_buflist;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_entry *entry;
+ struct drm_buf *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
@@ -405,79 +547,117 @@ drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
int size;
int alignment;
int page_order;
+ int total;
int byte_count;
int i;
+ struct drm_buf **temp_buflist;
if (!dma)
- return (EINVAL);
+ return -EINVAL;
count = request->count;
order = drm_order(request->size);
size = 1 << order;
- alignment = (request->flags & _DRM_PAGE_ALIGN)
+ alignment = (request->flags & _DRM_PAGE_ALIGN)
? round_page(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = dev->agp->base + request->agp_start;
- entry = &dma->bufs[order];
+ DRM_DEBUG("count: %d\n", count);
+ DRM_DEBUG("order: %d\n", order);
+ DRM_DEBUG("size: %d\n", size);
+ DRM_DEBUG("agp_offset: %lx\n", agp_offset);
+ DRM_DEBUG("alignment: %d\n", alignment);
+ DRM_DEBUG("page_order: %d\n", page_order);
+ DRM_DEBUG("total: %d\n", total);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+ return -EINVAL;
+
+ spin_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ spin_unlock(&dev->count_lock);
- /* No more than one allocation per order */
+ mutex_lock(&dev->struct_mutex);
+ entry = &dma->bufs[order];
if (entry->buf_count) {
- return (ENOMEM);
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM; /* May only call once for each order */
}
- entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
- DRM_MEM_BUFS);
+ if (count < 0 || count > 4096) {
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -EINVAL;
+ }
+
+ entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
- return (ENOMEM);
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
}
+ (void) memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
- buf->offset = (dma->byte_count + offset);
+ buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset);
- buf->next = NULL;
- buf->pending = 0;
- buf->filp = NULL;
+ buf->address = (void *)(agp_offset + offset);
+ buf->next = NULL;
+ buf->pending = 0;
+ buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->buf_priv_size;
- buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
- if (buf->dev_private == NULL) {
+ buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+ if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
- return (ENOMEM);
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
}
+ (void) memset(buf->dev_private, 0, buf->dev_priv_size);
+
+ DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
- temp_buflist = drm_alloc(
- (dma->buf_count + entry->buf_count) * sizeof (*dma->buflist),
- DRM_MEM_BUFS);
+ DRM_DEBUG("byte_count: %d\n", byte_count);
- if (temp_buflist == NULL) {
+ temp_buflist = kmalloc(
+ (dma->buf_count + entry->buf_count) *
+ sizeof(*dma->buflist), GFP_KERNEL);
+ if (!temp_buflist) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
- DRM_ERROR(" temp_buflist is NULL");
- return (ENOMEM);
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
}
-
bcopy(temp_buflist, dma->buflist,
dma->buf_count * sizeof (*dma->buflist));
kmem_free(dma->buflist, dma->buf_count *sizeof (*dma->buflist));
@@ -488,24 +668,29 @@ drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
}
dma->buf_count += entry->buf_count;
- dma->byte_count += byte_count;
dma->seg_count += entry->seg_count;
dma->page_count += byte_count >> PAGE_SHIFT;
+ dma->byte_count += byte_count;
+
+ DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+ DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+ mutex_unlock(&dev->struct_mutex);
request->count = entry->buf_count;
request->size = size;
dma->flags = _DRM_DMA_USE_AGP;
- return (0);
+ atomic_dec(&dev->buf_alloc);
+ return 0;
}
-static int
-drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
+static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request, cred_t *credp)
{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_entry_t *entry;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_entry *entry;
+ struct drm_buf *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
@@ -513,26 +698,73 @@ drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
int size;
int alignment;
int page_order;
+ int total;
int byte_count;
int i;
- drm_buf_t **temp_buflist;
+ struct drm_buf **temp_buflist;
+
+ if (!drm_core_check_feature(dev, DRIVER_SG))
+ return -EINVAL;
+
+ if (!dma)
+ return -EINVAL;
+
+ if (!DRM_SUSER(credp))
+ return -EPERM;
count = request->count;
order = drm_order(request->size);
size = 1 << order;
- alignment = (request->flags & _DRM_PAGE_ALIGN)
+ alignment = (request->flags & _DRM_PAGE_ALIGN)
? round_page(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = request->agp_start;
+
+ DRM_DEBUG("count: %d\n", count);
+ DRM_DEBUG("order: %d\n", order);
+ DRM_DEBUG("size: %d\n", size);
+ DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+ DRM_DEBUG("alignment: %d\n", alignment);
+ DRM_DEBUG("page_order: %d\n", page_order);
+ DRM_DEBUG("total: %d\n", total);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+ return -EINVAL;
+
+ spin_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ spin_unlock(&dev->count_lock);
+
+ mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
+ if (entry->buf_count) {
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM; /* May only call once for each order */
+ }
+
+ if (count < 0 || count > 4096) {
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -EINVAL;
+ }
- entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
- DRM_MEM_BUFS);
- if (entry->buflist == NULL)
- return (ENOMEM);
+ entry->buflist = kmalloc(count * sizeof(*entry->buflist),
+ GFP_KERNEL);
+ if (!entry->buflist) {
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ (void) memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
@@ -540,41 +772,51 @@ drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
offset = 0;
while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
- buf->offset = (dma->byte_count + offset);
+ buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset + dev->sg->handle);
- buf->next = NULL;
- buf->pending = 0;
- buf->filp = NULL;
+ buf->next = NULL;
+ buf->pending = 0;
+ buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->buf_priv_size;
- buf->dev_private = drm_alloc(buf->dev_priv_size,
- DRM_MEM_BUFS);
- if (buf->dev_private == NULL) {
+ buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+ if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
- return (ENOMEM);
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
}
+ (void) memset(buf->dev_private, 0, buf->dev_priv_size);
+
+ DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
+ DRM_DEBUG("byte_count: %d\n", byte_count);
+
temp_buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof (*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof (*dma->buflist), DRM_MEM_BUFS);
if (!temp_buflist) {
+ /* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
- return (ENOMEM);
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
}
dma->buflist = temp_buflist;
@@ -584,314 +826,386 @@ drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
dma->buf_count += entry->buf_count;
dma->byte_count += byte_count;
+
+ DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+ DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+ mutex_unlock(&dev->struct_mutex);
+
request->count = entry->buf_count;
request->size = size;
+
dma->flags = _DRM_DMA_USE_SG;
- return (0);
+ atomic_dec(&dev->buf_alloc);
+ return 0;
}
-int
-drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
+/**
+ * Add buffers for DMA transfers (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_buf_desc request.
+ * \return zero on success or a negative number on failure.
+ *
+ * According with the memory type specified in drm_buf_desc::flags and the
+ * build options, it dispatches the call either to addbufs_agp(),
+ * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
+ * PCI memory respectively.
+ */
+/* LINTED */
+int drm_addbufs(DRM_IOCTL_ARGS)
{
- int order, ret;
-
- DRM_SPINLOCK(&dev->dma_lock);
-
- if (request->count < 0 || request->count > 4096) {
- DRM_SPINLOCK(&dev->dma_lock);
- return (EINVAL);
- }
+ struct drm_buf_desc *request = data;
+ int ret = -EINVAL;
- order = drm_order(request->size);
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
- DRM_SPINLOCK(&dev->dma_lock);
- return (EINVAL);
- }
-
- /* No more allocations after first buffer-using ioctl. */
- if (dev->buf_use != 0) {
- DRM_SPINUNLOCK(&dev->dma_lock);
- return (EBUSY);
- }
- /* No more than one allocation per order */
- if (dev->dma->bufs[order].buf_count != 0) {
- DRM_SPINUNLOCK(&dev->dma_lock);
- return (ENOMEM);
- }
-
- ret = drm_do_addbufs_agp(dev, request);
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ return -EINVAL;
- DRM_SPINUNLOCK(&dev->dma_lock);
+ if (request->flags & _DRM_AGP_BUFFER)
+ ret = drm_addbufs_agp(dev, request, credp);
+ else
+ if (request->flags & _DRM_SG_BUFFER)
+ ret = drm_addbufs_sg(dev, request, credp);
- return (ret);
+ return ret;
}
-int
-drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
+/**
+ * Get information about the buffer mappings.
+ *
+ * This was originally mean for debugging purposes, or by a sophisticated
+ * client library to determine how best to use the available buffers (e.g.,
+ * large buffers can be used for image transfer).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Increments drm_device::buf_use while holding the drm_device::count_lock
+ * lock, preventing of allocating more buffers after this call. Information
+ * about each requested buffer is then copied into user space.
+ */
+/* LINTED */
+int drm_infobufs(DRM_IOCTL_ARGS)
{
- int order, ret;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_info *request = data;
+ int i;
+ int count;
- DRM_SPINLOCK(&dev->dma_lock);
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ return -EINVAL;
- if (request->count < 0 || request->count > 4096) {
- DRM_SPINUNLOCK(&dev->dma_lock);
- return (EINVAL);
- }
+ if (!dma)
+ return -EINVAL;
- order = drm_order(request->size);
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
- DRM_SPINUNLOCK(&dev->dma_lock);
- return (EINVAL);
+ spin_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
}
+ ++dev->buf_use; /* Can't allocate more after this call */
+ spin_unlock(&dev->count_lock);
- /* No more allocations after first buffer-using ioctl. */
- if (dev->buf_use != 0) {
- DRM_SPINUNLOCK(&dev->dma_lock);
- return (EBUSY);
+ for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+ if (dma->bufs[i].buf_count)
+ ++count;
}
- /* No more than one allocation per order */
- if (dev->dma->bufs[order].buf_count != 0) {
- DRM_SPINUNLOCK(&dev->dma_lock);
- return (ENOMEM);
+ DRM_DEBUG("count = %d\n", count);
+
+ if (request->count >= count) {
+ for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+ if (dma->bufs[i].buf_count) {
+ struct drm_buf_desc __user *to =
+ &request->list[count];
+ struct drm_buf_entry *from = &dma->bufs[i];
+ struct drm_freelist *list = &dma->bufs[i].freelist;
+ if (copy_to_user(&to->count,
+ &from->buf_count,
+ sizeof(from->buf_count)) ||
+ copy_to_user(&to->size,
+ &from->buf_size,
+ sizeof(from->buf_size)) ||
+ copy_to_user(&to->low_mark,
+ &list->low_mark,
+ sizeof(list->low_mark)) ||
+ copy_to_user(&to->high_mark,
+ &list->high_mark,
+ sizeof(list->high_mark)))
+ return -EFAULT;
+
+ DRM_DEBUG("%d %d %d %d %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].freelist.low_mark,
+ dma->bufs[i].freelist.high_mark);
+ ++count;
+ }
+ }
}
+ request->count = count;
- ret = drm_do_addbufs_sg(dev, request);
- DRM_SPINUNLOCK(&dev->dma_lock);
- return (ret);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_addbufs_ioctl(DRM_IOCTL_ARGS)
+/**
+ * Specifies a low and high water mark for buffer allocation
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg a pointer to a drm_buf_desc structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies that the size order is bounded between the admissible orders and
+ * updates the respective drm_device_dma::bufs entry low and high water mark.
+ *
+ * \note This ioctl is deprecated and mostly never used.
+ */
+/* LINTED */
+int drm_markbufs(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_buf_desc_t request;
- int err;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_desc *request = data;
+ int order;
+ struct drm_buf_entry *entry;
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_buf_desc_32_t request32;
- DRM_COPYFROM_WITH_RETURN(&request32,
- (void *)data, sizeof (request32));
- request.count = request32.count;
- request.size = request32.size;
- request.low_mark = request32.low_mark;
- request.high_mark = request32.high_mark;
- request.flags = request32.flags;
- request.agp_start = request32.agp_start;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request,
- (void *)data, sizeof (request));
-
- if (request.flags & _DRM_AGP_BUFFER)
- err = drm_addbufs_agp(dev, &request);
- else if (request.flags & _DRM_SG_BUFFER)
- err = drm_addbufs_sg(dev, &request);
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_buf_desc_32_t request32;
- request32.count = request.count;
- request32.size = request.size;
- request32.low_mark = request.low_mark;
- request32.high_mark = request.high_mark;
- request32.flags = request.flags;
- request32.agp_start = (uint32_t)request.agp_start;
- DRM_COPYTO_WITH_RETURN((void *)data,
- &request32, sizeof (request32));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data,
- &request, sizeof (request));
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ return -EINVAL;
+
+ if (!dma)
+ return -EINVAL;
- return (err);
+ DRM_DEBUG("%d, %d, %d\n",
+ request->size, request->low_mark, request->high_mark);
+ order = drm_order(request->size);
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+ return -EINVAL;
+ entry = &dma->bufs[order];
+
+ if (request->low_mark < 0 || request->low_mark > entry->buf_count)
+ return -EINVAL;
+ if (request->high_mark < 0 || request->high_mark > entry->buf_count)
+ return -EINVAL;
+
+ entry->freelist.low_mark = request->low_mark;
+ entry->freelist.high_mark = request->high_mark;
+
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_freebufs(DRM_IOCTL_ARGS)
+/**
+ * Unreserve the buffers in list, previously reserved using drmDMA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_free structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls free_buffer() for each used buffer.
+ * This function is primarily used for debugging.
+ */
+/* LINTED */
+int drm_freebufs(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_free_t request;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_free *request = data;
int i;
int idx;
- drm_buf_t *buf;
- int retcode = 0;
+ struct drm_buf *buf;
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_buf_free_32_t request32;
- DRM_COPYFROM_WITH_RETURN(&request32,
- (void*)data, sizeof (request32));
- request.count = request32.count;
- request.list = (int *)(uintptr_t)request32.list;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request,
- (void *)data, sizeof (request));
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ return -EINVAL;
- for (i = 0; i < request.count; i++) {
- if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof (idx))) {
- retcode = EFAULT;
- break;
- }
+ if (!dma)
+ return -EINVAL;
+
+ DRM_DEBUG("%d\n", request->count);
+ for (i = 0; i < request->count; i++) {
+ if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof (idx)))
+ return -EFAULT;
if (idx < 0 || idx >= dma->buf_count) {
- DRM_ERROR("drm_freebufs: Index %d (of %d max)\n",
- idx, dma->buf_count - 1);
- retcode = EINVAL;
- break;
+ DRM_ERROR("Index %d (of %d max)\n",
+ idx, dma->buf_count - 1);
+ return -EINVAL;
}
buf = dma->buflist[idx];
- if (buf->filp != fpriv) {
- DRM_ERROR(
- "drm_freebufs: process %d not owning the buffer.\n",
+ if (buf->file_priv != file) {
+ DRM_ERROR("Process %d freeing buffer not owned\n",
DRM_CURRENTPID);
- retcode = EINVAL;
- break;
+ return -EINVAL;
}
drm_free_buffer(dev, buf);
}
- return (retcode);
+ return 0;
}
-#ifdef _LP64
-extern caddr_t smmap64(caddr_t, size_t, int, int, int, off_t);
-#define drm_smmap smmap64
-#else
-#if defined(_SYSCALL32_IMPL) || defined(_ILP32)
-extern caddr_t smmap32(caddr32_t, size32_t, int, int, int, off32_t);
-#define drm_smmap smmap32
-#else
-#error "No define for _LP64, _SYSCALL32_IMPL or _ILP32"
-#endif
-#endif
-
-
-/*ARGSUSED*/
-int
-drm_mapbufs(DRM_IOCTL_ARGS)
+/**
+ * Maps all of the DMA buffers into client-virtual space (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
+ * about each buffer into user space. For PCI buffers, it calls do_mmap() with
+ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
+ * drm_mmap_dma().
+ */
+/* LINTED */
+int drm_mapbufs(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_buf_map_t request;
+ struct drm_device_dma *dma = dev->dma;
+ int retcode = 0;
const int zero = 0;
- unsigned long vaddr;
+ unsigned long virtual;
unsigned long address;
- drm_device_dma_t *dma = dev->dma;
- uint_t size;
- uint_t foff;
- int ret_tmp;
- int i;
-
-#ifdef _MULTI_DATAMODEL
- drm_buf_map_32_t request32;
- drm_buf_pub_32_t *list32;
- uint_t address32;
-
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- DRM_COPYFROM_WITH_RETURN(&request32,
- (void *)data, sizeof (request32));
- request.count = request32.count;
- request.virtual = (void *)(uintptr_t)request32.virtual;
- request.list = (drm_buf_pub_t *)(uintptr_t)request32.list;
- request.fd = request32.fd;
- } else
+ struct drm_buf_map *request = data;
+ int i;
+ uint_t size, foff;
+
+#ifdef _MULTI_DATAMODEL
+ struct drm_buf_pub_32 *list32;
+ uint_t address32;
#endif
- DRM_COPYFROM_WITH_RETURN(&request,
- (void *)data, sizeof (request));
-
- dev->buf_use++;
-
- if (request.count < dma->buf_count)
- goto done;
-
- if ((dev->driver->use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
- (dev->driver->use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
- drm_local_map_t *map = dev->agp_buffer_map;
- if (map == NULL)
- return (EINVAL);
- size = round_page(map->size);
- foff = (uintptr_t)map->handle;
- } else {
- size = round_page(dma->byte_count);
- foff = 0;
- }
- request.virtual = drm_smmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_SHARED, request.fd, foff);
- if (request.virtual == NULL) {
- DRM_ERROR("drm_mapbufs: request.virtual is NULL");
- return (EINVAL);
- }
-
- vaddr = (unsigned long) request.virtual;
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- list32 = (drm_buf_pub_32_t *)(uintptr_t)request32.list;
- for (i = 0; i < dma->buf_count; i++) {
- if (DRM_COPY_TO_USER(&list32[i].idx,
- &dma->buflist[i]->idx, sizeof (list32[0].idx))) {
- return (EFAULT);
+
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ return -EINVAL;
+
+ if (!dma)
+ return -EINVAL;
+
+ spin_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ dev->buf_use++; /* Can't allocate more after this call */
+ spin_unlock(&dev->count_lock);
+
+ if (request->count >= dma->buf_count) {
+ if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+ || (drm_core_check_feature(dev, DRIVER_SG)
+ && (dma->flags & _DRM_DMA_USE_SG))) {
+ struct drm_local_map *map = dev->agp_buffer_map;
+
+ if (!map) {
+ retcode = -EINVAL;
+ goto done;
}
- if (DRM_COPY_TO_USER(&list32[i].total,
- &dma->buflist[i]->total,
- sizeof (list32[0].total))) {
- return (EFAULT);
+ size = round_page(map->size);
+ foff = (uintptr_t)map->handle;
+ } else {
+ size = round_page(dma->byte_count);
+ foff = 0;
+ }
+ request->virtual = drm_smmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, request->fd, foff);
+ if (request->virtual == NULL) {
+ DRM_ERROR("request->virtual is NULL");
+ retcode = -EINVAL;
+ goto done;
+ }
+
+ virtual = (unsigned long) request->virtual;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(ioctl_mode & FMODELS) == DDI_MODEL_ILP32) {
+ list32 = (drm_buf_pub_32_t *)(uintptr_t)request->list;
+ for (i = 0; i < dma->buf_count; i++) {
+ if (DRM_COPY_TO_USER(&list32[i].idx,
+ &dma->buflist[i]->idx,
+ sizeof (list32[0].idx))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (DRM_COPY_TO_USER(&list32[i].total,
+ &dma->buflist[i]->total,
+ sizeof (list32[0].total))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (DRM_COPY_TO_USER(&list32[i].used,
+ &zero, sizeof (zero))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ address32 = virtual + dma->buflist[i]->offset; /* *** */
+ if (DRM_COPY_TO_USER(&list32[i].address,
+ &address32, sizeof (list32[0].address))) {
+ retcode = -EFAULT;
+ goto done;
+ }
}
- if (DRM_COPY_TO_USER(&list32[i].used,
- &zero, sizeof (zero))) {
- return (EFAULT);
+ } else {
+#endif
+ for (i = 0; i < dma->buf_count; i++) {
+ if (DRM_COPY_TO_USER(&request->list[i].idx,
+ &dma->buflist[i]->idx,
+ sizeof (request->list[0].idx))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (DRM_COPY_TO_USER(&request->list[i].total,
+ &dma->buflist[i]->total,
+ sizeof (request->list[0].total))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
+ sizeof (zero))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ address = virtual + dma->buflist[i]->offset; /* *** */
+
+ if (DRM_COPY_TO_USER(&request->list[i].address,
+ &address, sizeof (address))) {
+ retcode = -EFAULT;
+ goto done;
+ }
}
- address32 = vaddr + dma->buflist[i]->offset; /* *** */
- ret_tmp = DRM_COPY_TO_USER(&list32[i].address,
- &address32, sizeof (list32[0].address));
- if (ret_tmp)
- return (EFAULT);
+#ifdef _MULTI_DATAMODEL
}
- goto done;
- }
#endif
+ }
+ done:
+ request->count = dma->buf_count;
+ DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
- ASSERT(ddi_model_convert_from(mode & FMODELS) != DDI_MODEL_ILP32);
- for (i = 0; i < dma->buf_count; i++) {
- if (DRM_COPY_TO_USER(&request.list[i].idx,
- &dma->buflist[i]->idx, sizeof (request.list[0].idx))) {
- return (EFAULT);
- }
- if (DRM_COPY_TO_USER(&request.list[i].total,
- &dma->buflist[i]->total, sizeof (request.list[0].total))) {
- return (EFAULT);
- }
- if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
- sizeof (zero))) {
- return (EFAULT);
- }
- address = vaddr + dma->buflist[i]->offset; /* *** */
+ return retcode;
+}
- ret_tmp = DRM_COPY_TO_USER(&request.list[i].address,
- &address, sizeof (address));
- if (ret_tmp) {
- return (EFAULT);
- }
- }
+/**
+ * Compute size order. Returns the exponent of the smaller power of two which
+ * is greater or equal to given number.
+ *
+ * \param size size.
+ * \return order.
+ *
+ * \todo Can be made faster.
+ */
+int drm_order(unsigned long size)
+{
+ int order;
+ unsigned long tmp;
-done:
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- request32.count = dma->buf_count;
- request32.virtual = (caddr32_t)(uintptr_t)request.virtual;
- DRM_COPYTO_WITH_RETURN((void *)data,
- &request32, sizeof (request32));
- } else {
-#endif
- request.count = dma->buf_count;
- DRM_COPYTO_WITH_RETURN((void *)data,
- &request, sizeof (request));
-#ifdef _MULTI_DATAMODEL
- }
-#endif
- return (0);
+ for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
+
+ if (size & (size - 1))
+ ++order;
+
+ return order;
}
+
diff --git a/usr/src/uts/common/io/drm/drm_cache.c b/usr/src/uts/common/io/drm/drm_cache.c
index fe7eff0..1f58fee 100644
--- a/usr/src/uts/common/io/drm/drm_cache.c
+++ b/usr/src/uts/common/io/drm/drm_cache.c
@@ -1,7 +1,10 @@
/*
- *
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
* Copyright(c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2012, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,10 +32,6 @@
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- */
-
#include <sys/x86_archext.h>
#include <vm/seg_kmem.h>
#include "drmP.h"
diff --git a/usr/src/uts/common/io/drm/drm_context.c b/usr/src/uts/common/io/drm/drm_context.c
index 16c141f..a9d783a 100644
--- a/usr/src/uts/common/io/drm/drm_context.c
+++ b/usr/src/uts/common/io/drm/drm_context.c
@@ -1,13 +1,22 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*/
/*
- * drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
- * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ * Copyright (c) 2006, 2013 Intel Corporation. All rights reserved.
+ */
+
+/**
+ * \file drm_context.c
+ * IOCTLs for generic contexts
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
*/
+
/*
+ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ *
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -30,418 +39,437 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
+/*
+ * ChangeLog:
+ * 2001-11-16 Torsten Duwe <duwe@caldera.de>
+ * added context constructor/destructor hooks,
+ * needed by SiS driver's memory management.
+ */
#include "drmP.h"
#include "drm_io32.h"
-static inline int
-find_first_zero_bit(volatile void *p, int max)
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
+/**
+ * Free a handle from the context bitmap.
+ *
+ * \param dev DRM device.
+ * \param ctx_handle context handle.
+ *
+ * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
+ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
+ * lock.
+ */
+void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
- int b;
- volatile int *ptr = (volatile int *)p;
-
- for (b = 0; b < max; b += 32) {
- if (ptr[b >> 5] != ~0) {
- for (;;) {
- if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0)
- return (b);
- b++;
- }
- }
- }
- return (max);
+ mutex_lock(&dev->struct_mutex);
+ (void) idr_remove(&dev->ctx_idr, ctx_handle);
+ mutex_unlock(&dev->struct_mutex);
}
-/*
- * Context bitmap support
+/**
+ * Context bitmap allocation.
+ *
+ * \param dev DRM device.
+ * \return (non-negative) context handle on success or a negative number on failure.
+ *
+ * Allocate a new idr from drm_device::ctx_idr while holding the
+ * drm_device::struct_mutex lock.
*/
-void
-drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle)
+static int drm_ctxbitmap_next(struct drm_device * dev)
{
- if (ctx_handle < 0 || ctx_handle >= DRM_MAX_CTXBITMAP ||
- dev->ctx_bitmap == NULL) {
- DRM_ERROR("drm_ctxbitmap_free: Attempt to free\
- invalid context handle: %d\n",
- ctx_handle);
- return;
- }
+ int new_id;
+ int ret;
- DRM_LOCK();
- clear_bit(ctx_handle, dev->ctx_bitmap);
- dev->context_sareas[ctx_handle] = NULL;
- DRM_UNLOCK();
+again:
+ if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("Out of memory expanding drawable idr\n");
+ return -ENOMEM;
+ }
+ mutex_lock(&dev->struct_mutex);
+ ret = idr_get_new_above(&dev->ctx_idr, NULL,
+ DRM_RESERVED_CONTEXTS, &new_id);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ return ret;
+
+ return new_id;
}
-/* Is supposed to return -1 if any error by calling functions */
-int
-drm_ctxbitmap_next(drm_device_t *dev)
+/**
+ * Context bitmap initialization.
+ *
+ * \param dev DRM device.
+ *
+ * Initialise the drm_device::ctx_idr
+ */
+int drm_ctxbitmap_init(struct drm_device * dev)
{
- int bit;
+ idr_init(&dev->ctx_idr);
+ return 0;
+}
- if (dev->ctx_bitmap == NULL)
- return (-1);
+/**
+ * Context bitmap cleanup.
+ *
+ * \param dev DRM device.
+ *
+ * Free all idr members using drm_ctx_sarea_free helper function
+ * while holding the drm_device::struct_mutex lock.
+ */
+void drm_ctxbitmap_cleanup(struct drm_device * dev)
+{
+ mutex_lock(&dev->struct_mutex);
+ idr_remove_all(&dev->ctx_idr);
+ mutex_unlock(&dev->struct_mutex);
+}
- DRM_LOCK();
- bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
- if (bit >= DRM_MAX_CTXBITMAP) {
- DRM_UNLOCK();
- return (-1);
- }
+/*@}*/
- set_bit(bit, dev->ctx_bitmap);
- DRM_DEBUG("drm_ctxbitmap_next: bit : %d", bit);
- if ((bit+1) > dev->max_context) {
- dev->max_context = (bit+1);
- if (dev->context_sareas != NULL) {
- drm_local_map_t **ctx_sareas;
- ctx_sareas = drm_realloc(dev->context_sareas,
- (dev->max_context - 1) *
- sizeof (*dev->context_sareas),
- dev->max_context *
- sizeof (*dev->context_sareas),
- DRM_MEM_MAPS);
- if (ctx_sareas == NULL) {
- clear_bit(bit, dev->ctx_bitmap);
- DRM_UNLOCK();
- return (-1);
- }
- dev->context_sareas = ctx_sareas;
- dev->context_sareas[bit] = NULL;
- } else {
- /* max_context == 1 at this point */
- dev->context_sareas = drm_alloc(dev->max_context *
- sizeof (*dev->context_sareas), KM_NOSLEEP);
- if (dev->context_sareas == NULL) {
- clear_bit(bit, dev->ctx_bitmap);
- DRM_UNLOCK();
- return (-1);
- }
- dev->context_sareas[bit] = NULL;
- }
- }
- DRM_UNLOCK();
- DRM_DEBUG("drm_ctxbitmap_next: return %d", bit);
- return (bit);
-}
+/******************************************************************/
+/** \name Per Context SAREA Support */
+/*@{*/
-int
-drm_ctxbitmap_init(drm_device_t *dev)
+/**
+ * Get per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Gets the map from drm_device::ctx_idr with the handle specified and
+ * returns its handle.
+ */
+/* LINTED */
+int drm_getsareactx(DRM_IOCTL_ARGS)
{
- int i;
- int temp;
+ struct drm_ctx_priv_map *request = data;
+ struct drm_local_map *map;
+ struct drm_map_list *_entry;
- DRM_LOCK();
- dev->ctx_bitmap = drm_calloc(1, DRM_PAGE_SIZE, DRM_MEM_CTXBITMAP);
- if (dev->ctx_bitmap == NULL) {
- DRM_UNLOCK();
- return (ENOMEM);
+ mutex_lock(&dev->struct_mutex);
+
+ map = idr_find(&dev->ctx_idr, request->ctx_id);
+ if (!map) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
}
- dev->context_sareas = NULL;
- dev->max_context = -1;
- DRM_UNLOCK();
- for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
- temp = drm_ctxbitmap_next(dev);
- DRM_DEBUG("drm_ctxbitmap_init : %d", temp);
+ request->handle = NULL;
+ list_for_each_entry(_entry, struct drm_map_list, &dev->maplist, head) {
+ if (_entry->map == map) {
+ request->handle =
+ (void *)(unsigned long)_entry->user_token;
+ break;
+ }
}
- return (0);
-}
-void
-drm_ctxbitmap_cleanup(drm_device_t *dev)
-{
- DRM_LOCK();
- if (dev->context_sareas != NULL)
- drm_free(dev->context_sareas,
- sizeof (*dev->context_sareas) *
- dev->max_context,
- DRM_MEM_MAPS);
- drm_free(dev->ctx_bitmap, DRM_PAGE_SIZE, DRM_MEM_CTXBITMAP);
- DRM_UNLOCK();
+ mutex_unlock(&dev->struct_mutex);
+
+ if (request->handle == NULL)
+ return -EINVAL;
+
+ return 0;
}
-/*
- * Per Context SAREA Support
+/**
+ * Set per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the mapping specified in \p arg and update the entry in
+ * drm_device::ctx_idr with it.
*/
-/*ARGSUSED*/
-int
-drm_getsareactx(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_setsareactx(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_ctx_priv_map_t request;
- drm_local_map_t *map;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_ctx_priv_map_32_t request32;
- DRM_COPYFROM_WITH_RETURN(&request32, (void *)data,
- sizeof (drm_ctx_priv_map_32_t));
- request.ctx_id = request32.ctx_id;
- request.handle = (void *)(uintptr_t)request32.handle;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request, (void *)data,
- sizeof (request));
-
- DRM_LOCK();
- if (dev->max_context < 0 || request.ctx_id >= (unsigned)
- dev->max_context) {
- DRM_UNLOCK();
- return (EINVAL);
+ struct drm_ctx_priv_map *request = data;
+ struct drm_local_map *map = NULL;
+ struct drm_map_list *r_list = NULL;
+
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry(r_list, struct drm_map_list, &dev->maplist, head) {
+ if (r_list->map
+ && r_list->user_token == (unsigned long) request->handle)
+ goto found;
}
+ bad:
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
- map = dev->context_sareas[request.ctx_id];
- DRM_UNLOCK();
-
+ found:
+ map = r_list->map;
if (!map)
- return (EINVAL);
-
- request.handle = map->handle;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_ctx_priv_map_32_t request32;
- request32.ctx_id = request.ctx_id;
- request32.handle = (caddr32_t)(uintptr_t)request.handle;
- DRM_COPYTO_WITH_RETURN((void *)data, &request32,
- sizeof (drm_ctx_priv_map_32_t));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data,
- &request, sizeof (request));
-
- return (0);
-}
+ goto bad;
-/*ARGSUSED*/
-int
-drm_setsareactx(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_ctx_priv_map_t request;
- drm_local_map_t *map = NULL;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_ctx_priv_map_32_t request32;
-
- DRM_COPYFROM_WITH_RETURN(&request32, (void *)data,
- sizeof (drm_ctx_priv_map_32_t));
- request.ctx_id = request32.ctx_id;
- request.handle = (void *)(uintptr_t)request32.handle;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request,
- (void *)data, sizeof (request));
-
- DRM_LOCK();
- TAILQ_FOREACH(map, &dev->maplist, link) {
- if (map->handle == request.handle) {
- if (dev->max_context < 0)
- goto bad;
- if (request.ctx_id >= (unsigned)dev->max_context)
- goto bad;
- dev->context_sareas[request.ctx_id] = map;
- DRM_UNLOCK();
- return (0);
- }
- }
+ if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
+ goto bad;
+
+ mutex_unlock(&dev->struct_mutex);
-bad:
- DRM_UNLOCK();
- return (EINVAL);
+ return 0;
}
-/*
- * The actual DRM context handling routines
+/*@}*/
+
+/******************************************************************/
+/** \name The actual DRM context handling routines */
+/*@{*/
+
+/**
+ * Switch context.
+ *
+ * \param dev DRM device.
+ * \param old old context handle.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to set drm_device::context_flag.
*/
-int
-drm_context_switch(drm_device_t *dev, int old, int new)
+static int drm_context_switch(struct drm_device * dev, int old, int new)
{
if (test_and_set_bit(0, &dev->context_flag)) {
- DRM_ERROR("drm_context_switch: Reentering -- FIXME");
- return (EBUSY);
+ DRM_ERROR("Reentering -- FIXME\n");
+ return -EBUSY;
}
- DRM_DEBUG("drm_context_switch: Context switch from %d to %d",
- old, new);
+ DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
- return (0);
+ return 0;
}
- return (0);
+ return 0;
}
-int
-drm_context_switch_complete(drm_device_t *dev, int new)
+/**
+ * Complete context switch.
+ *
+ * \param dev DRM device.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Updates drm_device::last_context and drm_device::last_switch. Verifies the
+ * hardware lock is held, clears the drm_device::context_flag and wakes up
+ * drm_device::context_wait.
+ */
+static int drm_context_switch_complete(struct drm_device *dev,
+ struct drm_file *file_priv, int new)
{
- dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = jiffies;
- if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
- DRM_ERROR(
- "drm_context_switch_complete: Lock not held");
+ if (file_priv->master->lock.hw_lock != NULL &&
+ !_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
}
- /*
- * If a context switch is ever initiated
- * when the kernel holds the lock, release
- * that lock here.
- */
+
+ /* If a context switch is ever initiated
+ when the kernel holds the lock, release
+ that lock here. */
clear_bit(0, &dev->context_flag);
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_resctx(DRM_IOCTL_ARGS)
+/**
+ * Reserve contexts.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_res structure.
+ * \return zero on success or a negative number on failure.
+ */
+/* LINTED */
+int drm_resctx(DRM_IOCTL_ARGS)
{
- drm_ctx_res_t res;
- drm_ctx_t ctx;
+ struct drm_ctx_res *res = data;
+ struct drm_ctx ctx;
int i;
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_ctx_res_32_t res32;
- DRM_COPYFROM_WITH_RETURN(&res32, (void *)data, sizeof (res32));
- res.count = res32.count;
- res.contexts = (drm_ctx_t *)(uintptr_t)res32.contexts;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&res, (void *)data, sizeof (res));
-
- if (res.count >= DRM_RESERVED_CONTEXTS) {
- bzero(&ctx, sizeof (ctx));
+ if (res->count >= DRM_RESERVED_CONTEXTS) {
+ (void) memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
- DRM_COPYTO_WITH_RETURN(&res.contexts[i],
+ DRM_COPYTO_WITH_RETURN(&res->contexts[i],
&ctx, sizeof (ctx));
}
}
- res.count = DRM_RESERVED_CONTEXTS;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_ctx_res_32_t res32;
- res32.count = res.count;
- res32.contexts = (caddr32_t)(uintptr_t)res.contexts;
+ res->count = DRM_RESERVED_CONTEXTS;
- DRM_COPYTO_WITH_RETURN((void *)data, &res32,
- sizeof (drm_ctx_res_32_t));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data, &res, sizeof (res));
-
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_addctx(DRM_IOCTL_ARGS)
+/**
+ * Add context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Get a new handle for the context and copy to userspace.
+ */
+/* LINTED */
+int drm_addctx(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_ctx_t ctx;
-
- DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+ struct drm_ctx_list *ctx_entry;
+ struct drm_ctx *ctx = data;
- ctx.handle = drm_ctxbitmap_next(dev);
- if (ctx.handle == DRM_KERNEL_CONTEXT) {
+ ctx->handle = drm_ctxbitmap_next(dev);
+ if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
- ctx.handle = drm_ctxbitmap_next(dev);
+ ctx->handle = drm_ctxbitmap_next(dev);
}
- if (ctx.handle == (drm_context_t)-1) {
- return (ENOMEM);
+ DRM_DEBUG("%d\n", ctx->handle);
+ /* LINTED */
+ if (ctx->handle == -1) {
+ DRM_DEBUG("Not enough free contexts.\n");
+ /* Should this return -EBUSY instead? */
+ return -ENOMEM;
}
- if (dev->driver->context_ctor && ctx.handle != DRM_KERNEL_CONTEXT) {
- dev->driver->context_ctor(dev, ctx.handle);
+ ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
+ if (!ctx_entry) {
+ DRM_DEBUG("out of memory\n");
+ return -ENOMEM;
}
- DRM_COPYTO_WITH_RETURN((void *)data, &ctx, sizeof (ctx));
+ INIT_LIST_HEAD(&ctx_entry->head);
+ ctx_entry->handle = ctx->handle;
+ ctx_entry->tag = file;
+
+ mutex_lock(&dev->ctxlist_mutex);
+ list_add(&ctx_entry->head, &dev->ctxlist, (caddr_t)ctx_entry);
+ ++dev->ctx_count;
+ mutex_unlock(&dev->ctxlist_mutex);
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_modctx(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_modctx(DRM_IOCTL_ARGS)
{
/* This does nothing */
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_getctx(DRM_IOCTL_ARGS)
+/**
+ * Get context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ */
+/* LINTED */
+int drm_getctx(DRM_IOCTL_ARGS)
{
- drm_ctx_t ctx;
-
- DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+ struct drm_ctx *ctx = data;
/* This is 0, because we don't handle any context flags */
- ctx.flags = 0;
+ ctx->flags = 0;
- DRM_COPYTO_WITH_RETURN((void *)data, &ctx, sizeof (ctx));
-
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_switchctx(DRM_IOCTL_ARGS)
+/**
+ * Switch context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch().
+ */
+/* LINTED */
+int drm_switchctx(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_ctx_t ctx;
-
- DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+ struct drm_ctx *ctx = data;
- DRM_DEBUG("drm_switchctx: %d", ctx.handle);
- return (drm_context_switch(dev, dev->last_context, ctx.handle));
+ DRM_DEBUG("%d\n", ctx->handle);
+ return drm_context_switch(dev, dev->last_context, ctx->handle);
}
-/*ARGSUSED*/
-int
-drm_newctx(DRM_IOCTL_ARGS)
+/**
+ * New context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch_complete().
+ */
+/* LINTED */
+int drm_newctx(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_ctx_t ctx;
-
- DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+ struct drm_ctx *ctx = data;
- DRM_DEBUG("drm_newctx: %d", ctx.handle);
- (void) drm_context_switch_complete(dev, ctx.handle);
+ DRM_DEBUG("%d\n", ctx->handle);
+ (void) drm_context_switch_complete(dev, file, ctx->handle);
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_rmctx(DRM_IOCTL_ARGS)
+/**
+ * Remove context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
+ */
+/* LINTED */
+int drm_rmctx(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_ctx_t ctx;
+ struct drm_ctx *ctx = data;
- DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+ DRM_DEBUG("%d\n", ctx->handle);
+ if (ctx->handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver->context_dtor)
+ dev->driver->context_dtor(dev, ctx->handle);
+ drm_ctxbitmap_free(dev, ctx->handle);
+ }
- DRM_DEBUG("drm_rmctx : %d", ctx.handle);
- if (ctx.handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor) {
- DRM_LOCK();
- dev->driver->context_dtor(dev, ctx.handle);
- DRM_UNLOCK();
- }
+ mutex_lock(&dev->ctxlist_mutex);
+ if (!list_empty(&dev->ctxlist)) {
+ struct drm_ctx_list *pos, *n;
- drm_ctxbitmap_free(dev, ctx.handle);
+ list_for_each_entry_safe(pos, n, struct drm_ctx_list, &dev->ctxlist, head) {
+ if (pos->handle == ctx->handle) {
+ list_del(&pos->head);
+ kfree(pos, sizeof (*pos));
+ --dev->ctx_count;
+ }
+ }
}
+ mutex_unlock(&dev->ctxlist_mutex);
- return (0);
+ return 0;
}
+
+/*@}*/
diff --git a/usr/src/uts/common/io/drm/drm_crtc.c b/usr/src/uts/common/io/drm/drm_crtc.c
new file mode 100644
index 0000000..47976d7
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_crtc.c
@@ -0,0 +1,3944 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006-2008, 2013, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include "drm.h"
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "drm_fourcc.h"
+
+/**
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
+ *
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented.
+ */
+void drm_modeset_lock_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head)
+ mutex_lock(&crtc->mutex);
+}
+
+/**
+ * drm_modeset_unlock_all - drop all modeset locks
+ * @dev: device
+ */
+void drm_modeset_unlock_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head)
+ mutex_unlock(&crtc->mutex);
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+/* Avoid boilerplate. I'm tired of typing. */
+#define DRM_ENUM_NAME_FN(fnname, list) \
+ const char *fnname(int val) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(list); i++) { \
+ if (list[i].type == val) \
+ return list[i].name; \
+ } \
+ return "(unknown)"; \
+ }
+
+/*
+ * Global properties
+ */
+static const struct drm_prop_enum_list drm_dpms_enum_list[] =
+{ { DRM_MODE_DPMS_ON, "On" },
+ { DRM_MODE_DPMS_STANDBY, "Standby" },
+ { DRM_MODE_DPMS_SUSPEND, "Suspend" },
+ { DRM_MODE_DPMS_OFF, "Off" }
+};
+
+DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
+/*
+ * Optional properties
+ */
+static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
+{
+ { DRM_MODE_SCALE_NONE, "None" },
+ { DRM_MODE_SCALE_FULLSCREEN, "Full" },
+ { DRM_MODE_SCALE_CENTER, "Center" },
+ { DRM_MODE_SCALE_ASPECT, "Full aspect" },
+};
+
+static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
+{
+ { DRM_MODE_DITHERING_OFF, "Off" },
+ { DRM_MODE_DITHERING_ON, "On" },
+ { DRM_MODE_DITHERING_AUTO, "Automatic" },
+};
+
+/*
+ * Non-global properties, but "required" for certain connectors.
+ */
+static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
+ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
+
+static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
+ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
+ drm_dvi_i_subconnector_enum_list)
+
+static const struct drm_prop_enum_list drm_tv_select_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
+
+static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+ drm_tv_subconnector_enum_list)
+
+static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+ { DRM_MODE_DIRTY_OFF, "Off" },
+ { DRM_MODE_DIRTY_ON, "On" },
+ { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
+};
+
+struct drm_conn_prop_enum_list {
+ int type;
+ const char *name;
+ int count;
+};
+
+/*
+ * Connector and encoder types.
+ */
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
+{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
+ { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
+ { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
+ { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
+ { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
+ { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
+ { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
+ { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
+ { DRM_MODE_CONNECTOR_Component, "Component", 0 },
+ { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
+ { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
+ { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
+ { DRM_MODE_CONNECTOR_TV, "TV", 0 },
+ { DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
+ { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
+};
+
+static const struct drm_prop_enum_list drm_encoder_enum_list[] =
+{ { DRM_MODE_ENCODER_NONE, "None" },
+ { DRM_MODE_ENCODER_DAC, "DAC" },
+ { DRM_MODE_ENCODER_TMDS, "TMDS" },
+ { DRM_MODE_ENCODER_LVDS, "LVDS" },
+ { DRM_MODE_ENCODER_TVDAC, "TV" },
+ { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+};
+
+const char *drm_get_encoder_name(const struct drm_encoder *encoder)
+{
+ static char buf[32];
+
+ (void) snprintf(buf, 32, "%s-%d",
+ drm_encoder_enum_list[encoder->encoder_type].name,
+ encoder->base.id);
+ return buf;
+}
+
+const char *drm_get_connector_name(const struct drm_connector *connector)
+{
+ static char buf[32];
+
+ (void) snprintf(buf, 32, "%s-%d",
+ drm_connector_enum_list[connector->connector_type].name,
+ connector->connector_type_id);
+ return buf;
+}
+
+const char *drm_get_connector_status_name(enum drm_connector_status status)
+{
+ if (status == connector_status_connected)
+ return "connected";
+ else if (status == connector_status_disconnected)
+ return "disconnected";
+ else
+ return "unknown";
+}
+static char printable_char(int c)
+{
+ return (char)(c);
+}
+
+const char *drm_get_format_name(uint32_t format)
+{
+ static char buf[32];
+
+ (void) snprintf(buf, sizeof(buf),
+ "%c%c%c%c %s-endian (0x%08x)",
+ printable_char(format & 0xff),
+ printable_char((format >> 8) & 0xff),
+ printable_char((format >> 16) & 0xff),
+ printable_char((format >> 24) & 0x7f),
+ format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
+ format);
+
+ return buf;
+}
+
+/**
+ * drm_mode_object_get - allocate a new modeset identifier
+ * @dev: DRM device
+ * @obj: object pointer, used to generate unique ID
+ * @obj_type: object type
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space. Used
+ * for tracking modes, CRTCs and connectors.
+ *
+ * RETURNS:
+ * New unique (relative to other objects in @dev) integer identifier for the
+ * object.
+ */
+static int drm_mode_object_get(struct drm_device *dev,
+ struct drm_mode_object *obj, uint32_t obj_type)
+{
+ int new_id = 0;
+ int ret;
+
+again:
+ if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("Ran out memory getting a mode number\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
+
+ if (!ret) {
+ /*
+ * Set up the object linking under the protection of the idr
+ * lock so that other users can't see inconsistent state.
+ */
+ obj->id = new_id;
+ obj->type = obj_type;
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ if (ret == -EAGAIN)
+ goto again;
+ return ret;
+}
+
+/**
+ * drm_mode_object_put - free an identifer
+ * @dev: DRM device
+ * @id: ID to free
+ *
+ *
+ * Free @id from @dev's unique identifier pool.
+ */
+static void drm_mode_object_put(struct drm_device *dev,
+ struct drm_mode_object *object)
+{
+ mutex_lock(&dev->mode_config.idr_mutex);
+ (void) idr_remove(&dev->mode_config.crtc_idr, object->id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+}
+
+/**
+ * drm_mode_object_find - look up a drm object with static lifetime
+ * @dev: drm device
+ * @id: id of the mode object
+ * @type: type of the mode object
+ *
+ * Note that framebuffers cannot be looked up with this functions - since those
+ * are reference counted, they need special treatment.
+ */
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type)
+{
+ struct drm_mode_object *obj = NULL;
+
+ /* Framebuffers are reference counted and need their own lookup
+ * function.*/
+ WARN_ON(type == DRM_MODE_OBJECT_FB);
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ obj = idr_find(&dev->mode_config.crtc_idr, id);
+ if (!obj || (obj->type != type) || (obj->id != id))
+ obj = NULL;
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return obj;
+}
+
+/**
+ * drm_framebuffer_init - initialize a framebuffer
+ * @dev: DRM device
+ *
+ *
+ * Allocates an ID for the framebuffer's parent mode object, sets its mode
+ * functions & device file and adds it to the master fd list.
+ *
+ * IMPORTANT:
+ * This functions publishes the fb and makes it available for concurrent access
+ * by other users. Which means by this point the fb _must_ be fully set up -
+ * since all the fb attributes are invariant over its lifetime, no further
+ * locking but only correct reference counting is required.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ int ret;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ kref_init(&fb->refcount);
+ INIT_LIST_HEAD(&fb->filp_head);
+ fb->dev = dev;
+ fb->funcs = funcs;
+
+ ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
+ if (ret)
+ goto out;
+
+ /* Grab the idr reference. */
+ drm_framebuffer_reference(fb);
+
+ dev->mode_config.num_fb++;
+ list_add(&fb->head, &dev->mode_config.fb_list, (caddr_t)fb);
+out:
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static void drm_framebuffer_free(struct kref *kref)
+{
+ struct drm_framebuffer *fb =
+ container_of(kref, struct drm_framebuffer, refcount);
+ fb->funcs->destroy(fb);
+}
+
+static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *obj = NULL;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ obj = idr_find(&dev->mode_config.crtc_idr, id);
+ if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id))
+ fb = NULL;
+ else
+ fb = obj_to_fb(obj);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return fb;
+}
+
+/**
+ * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
+ * @dev: drm device
+ * @id: id of the fb object
+ *
+ * If successful, this grabs an additional reference to the framebuffer -
+ * callers need to make sure to eventually unreference the returned framebuffer
+ * again.
+ */
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+
+ fb = __drm_framebuffer_lookup(dev, id);
+ if (fb)
+ kref_get(&fb->refcount);
+
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return fb;
+}
+
+/**
+ * drm_framebuffer_unreference - unref a framebuffer
+ *
+ */
+void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ kref_put(&fb->refcount, drm_framebuffer_free);
+}
+
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ */
+void drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ kref_get(&fb->refcount);
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+static void drm_framebuffer_free_bug(struct kref *kref)
+{
+ BUG();
+}
+
+static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ kref_put(&fb->refcount, drm_framebuffer_free_bug);
+}
+
+/* dev->mode_config.fb_lock must be held! */
+static void __drm_framebuffer_unregister(struct drm_device *dev,
+ struct drm_framebuffer *fb)
+{
+ mutex_lock(&dev->mode_config.idr_mutex);
+ (void) idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ fb->base.id = 0;
+
+ __drm_framebuffer_unreference(fb);
+}
+
+/**
+ * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
+ * @fb: fb to unregister
+ *
+ * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
+ * those used for fbdev. Note that the caller must hold a reference of it's own,
+ * i.e. the object may not be destroyed through this call (since it'll lead to a
+ * locking inversion).
+ */
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ /* Mark fb as reaped and drop idr ref. */
+ __drm_framebuffer_unregister(dev, fb);
+ mutex_unlock(&dev->mode_config.fb_lock);
+}
+
+/**
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ *
+ * Cleanup references to a user-created framebuffer. This function is intended
+ * to be used from the drivers ->destroy callback.
+ *
+ * Note that this function does not remove the fb from active usuage - if it is
+ * still used anywhere, hilarity can ensue since userspace could call getfb on
+ * the id and get back -EINVAL. Obviously no concern at driver unload time.
+ *
+ * Also, the framebuffer will not be removed from the lookup idr - for
+ * user-created framebuffers this will happen in in the rmfb ioctl. For
+ * driver-private objects (e.g. for fbdev) drivers need to explicitly call
+ * drm_framebuffer_unregister_private.
+ */
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+ mutex_lock(&dev->mode_config.fb_lock);
+ list_del(&fb->head);
+ dev->mode_config.num_fb--;
+ mutex_unlock(&dev->mode_config.fb_lock);
+}
+
+/**
+ * drm_framebuffer_remove - remove and unreference a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * using @fb, removes it, setting it to NULL. Then drops the reference to the
+ * passed-in framebuffer. Might take the modeset locks.
+ *
+ * Note that this function optimizes the cleanup away if the caller holds the
+ * last reference to the framebuffer. It is also guaranteed to not take the
+ * modeset locks in this case.
+ */
+void drm_framebuffer_remove(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_mode_set set;
+ int ret;
+
+ WARN_ON(!list_empty(&fb->filp_head));
+
+ /*
+ * drm ABI mandates that we remove any deleted framebuffers from active
+ * useage. But since most sane clients only remove framebuffers they no
+ * longer need, try to optimize this away.
+ *
+ * Since we're holding a reference ourselves, observing a refcount of 1
+ * means that we're the last holder and can skip it. Also, the refcount
+ * can never increase from 1 again, so we don't need any barriers or
+ * locks.
+ *
+ * Note that userspace could try to race with use and instate a new
+ * usage _after_ we've cleared all current ones. End result will be an
+ * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
+ * in this manner.
+ */
+ if (atomic_read(&fb->refcount.refcount) > 1) {
+ drm_modeset_lock_all(dev);
+ /* remove from any CRTC */
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb == fb) {
+ /* should turn off the crtc */
+ (void) memset(&set, 0, sizeof(struct drm_mode_set));
+ set.crtc = crtc;
+ set.fb = NULL;
+ ret = drm_mode_set_config_internal(&set);
+ if (ret)
+ DRM_ERROR("failed to reset crtc %p when fb was deleted\n", (void *)crtc);
+ }
+ }
+ list_for_each_entry(plane, struct drm_plane, &dev->mode_config.plane_list, head) {
+ if (plane->fb == fb)
+ drm_plane_force_disable(plane);
+ }
+ drm_modeset_unlock_all(dev);
+ }
+
+ drm_framebuffer_unreference(fb);
+}
+
+
+/**
+ * drm_crtc_init - Initialise a new CRTC object
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ *
+ * Inits a new object created as base part of an driver crtc object.
+ */
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs)
+{
+ int ret;
+
+ crtc->dev = dev;
+ crtc->funcs = funcs;
+ crtc->invert_dimensions = false;
+
+ drm_modeset_lock_all(dev);
+ mutex_init(&crtc->mutex, NULL, MUTEX_DRIVER, NULL);
+ mutex_lock(&crtc->mutex);
+
+ ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+ if (ret)
+ goto out;
+
+ crtc->base.properties = &crtc->properties;
+
+ list_add_tail(&crtc->head, &dev->mode_config.crtc_list, (caddr_t)crtc);
+ dev->mode_config.num_crtc++;
+
+ out:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+/**
+ * drm_crtc_cleanup - Cleans up the core crtc usage.
+ * @crtc: CRTC to cleanup
+ *
+ *
+ * Cleanup @crtc. Removes from drm modesetting space
+ * does NOT free object, caller does that.
+ */
+void drm_crtc_cleanup(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+
+ if (crtc->gamma_store) {
+ kfree(crtc->gamma_store, crtc->gamma_size * sizeof(uint16_t) * 3);
+ crtc->gamma_store = NULL;
+ }
+
+ drm_mode_object_put(dev, &crtc->base);
+ list_del(&crtc->head);
+ dev->mode_config.num_crtc--;
+}
+
+/**
+ * drm_mode_probed_add - add a mode to a connector's probed mode list
+ * @connector: connector the new mode
+ * @mode: mode data
+ *
+ *
+ * Add @mode to @connector's mode list for later use.
+ */
+void drm_mode_probed_add(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ list_add_tail(&mode->head, &connector->probed_modes, (caddr_t)mode);
+}
+
+/**
+ * drm_mode_remove - remove and free a mode
+ * @connector: connector list to modify
+ * @mode: mode to remove
+ *
+ *
+ * Remove @mode from @connector's mode list, then free it.
+ */
+void drm_mode_remove(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ list_del(&mode->head);
+ drm_mode_destroy(connector->dev, mode);
+}
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @name: user visible name of the connector
+ *
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type)
+{
+ int ret;
+
+ drm_modeset_lock_all(dev);
+
+ ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+ if (ret)
+ goto out;
+
+ connector->base.properties = &connector->properties;
+ connector->dev = dev;
+ connector->funcs = funcs;
+ connector->connector_type = connector_type;
+ connector->connector_type_id =
+ ++drm_connector_enum_list[connector_type].count; /* TODO */
+ INIT_LIST_HEAD(&connector->probed_modes);
+ INIT_LIST_HEAD(&connector->modes);
+ connector->edid_blob_ptr = NULL;
+ connector->status = connector_status_unknown;
+
+ list_add_tail(&connector->head, &dev->mode_config.connector_list, (caddr_t)connector);
+ dev->mode_config.num_connector++;
+
+ if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.edid_property,
+ 0);
+
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.dpms_property, 0);
+
+ out:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+/**
+ * drm_connector_cleanup - cleans up an initialised connector
+ * @connector: connector to cleanup
+ *
+ *
+ * Cleans up the connector but doesn't free the object.
+ */
+void drm_connector_cleanup(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *t;
+
+ list_for_each_entry_safe(mode, t, struct drm_display_mode, &connector->probed_modes, head)
+ drm_mode_remove(connector, mode);
+
+ list_for_each_entry_safe(mode, t, struct drm_display_mode, &connector->modes, head)
+ drm_mode_remove(connector, mode);
+
+ drm_mode_object_put(dev, &connector->base);
+ list_del(&connector->head);
+ dev->mode_config.num_connector--;
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+void drm_connector_unplug_all(struct drm_device *dev)
+{
+ /*
+ struct drm_connector *connector;
+ */
+ /* taking the mode config mutex ends up in a clash with sysfs */
+ /*
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head)
+ drm_sysfs_connector_remove(connector);
+ */
+}
+
+int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type)
+{
+ int ret;
+ drm_modeset_lock_all(dev);
+
+ ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+ if (ret)
+ goto out;
+
+ encoder->dev = dev;
+ encoder->encoder_type = encoder_type;
+ encoder->funcs = funcs;
+
+ list_add_tail(&encoder->head, &dev->mode_config.encoder_list, (caddr_t)encoder);
+ dev->mode_config.num_encoder++;
+
+ out:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+void drm_encoder_cleanup(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ drm_modeset_lock_all(dev);
+ drm_mode_object_put(dev, &encoder->base);
+ list_del(&encoder->head);
+ dev->mode_config.num_encoder--;
+ drm_modeset_unlock_all(dev);
+}
+
+/**
+ * drm_plane_init - Initialise a new plane object
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @format_count: number of elements in @formats
+ * @priv: plane is private (hidden from userspace)?
+ *
+ * Inits a new object created as base part of a driver plane object.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool priv)
+{
+ int ret;
+
+ drm_modeset_lock_all(dev);
+
+ ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+ if (ret)
+ goto out;
+
+ plane->base.properties = &plane->properties;
+ plane->dev = dev;
+ plane->funcs = funcs;
+ plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
+ GFP_KERNEL);
+ if (!plane->format_types) {
+ DRM_DEBUG_KMS("out of memory when allocating plane\n");
+ drm_mode_object_put(dev, &plane->base);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ (void) memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+ plane->format_count = format_count;
+ plane->possible_crtcs = (uint32_t) possible_crtcs;
+
+ /* private planes are not exposed to userspace, but depending on
+ * display hardware, might be convenient to allow sharing programming
+ * for the scanout engine with the crtc implementation.
+ */
+ if (!priv) {
+ list_add_tail(&plane->head, &dev->mode_config.plane_list, (caddr_t)plane);
+ dev->mode_config.num_plane++;
+ } else {
+ INIT_LIST_HEAD(&plane->head);
+ }
+
+ out:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+/**
+ * drm_plane_cleanup - Clean up the core plane usage
+ * @plane: plane to cleanup
+ *
+ * This function cleans up @plane and removes it from the DRM mode setting
+ * core. Note that the function does *not* free the plane structure itself,
+ * this is the responsibility of the caller.
+ */
+void drm_plane_cleanup(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+
+ drm_modeset_lock_all(dev);
+ kfree(plane->format_types, sizeof(uint32_t) * plane->format_count);
+ drm_mode_object_put(dev, &plane->base);
+ /* if not added to a list, it must be a private plane */
+ if (!list_empty(&plane->head)) {
+ list_del(&plane->head);
+ dev->mode_config.num_plane--;
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+/**
+ * drm_plane_force_disable - Forcibly disable a plane
+ * @plane: plane to disable
+ *
+ * Forces the plane to be disabled.
+ *
+ * Used when the plane's current framebuffer is destroyed,
+ * and when restoring fbdev mode.
+ */
+void drm_plane_force_disable(struct drm_plane *plane)
+{
+ int ret;
+
+ if (!plane->fb)
+ return;
+
+ ret = plane->funcs->disable_plane(plane);
+ if (ret)
+ DRM_ERROR("failed to disable plane with busy fb\n");
+ /* disconnect the plane from the fb and crtc: */
+ __drm_framebuffer_unreference(plane->fb);
+ plane->fb = NULL;
+ plane->crtc = NULL;
+}
+
+/**
+ * drm_mode_create - create a new display mode
+ * @dev: DRM device
+ *
+ *
+ * Create a new drm_display_mode, give it an ID, and return it.
+ *
+ * RETURNS:
+ * Pointer to new mode on success, NULL on error.
+ */
+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+{
+ struct drm_display_mode *nmode;
+
+ nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
+ if (!nmode)
+ return NULL;
+
+ if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
+ kfree(nmode, sizeof(struct drm_display_mode));
+ return NULL;
+ }
+
+ return nmode;
+}
+
+/**
+ * drm_mode_destroy - remove a mode
+ * @dev: DRM device
+ * @mode: mode to remove
+ *
+ *
+ * Free @mode's unique identifier, then free it.
+ */
+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+{
+ if (!mode)
+ return;
+
+ drm_mode_object_put(dev, &mode->base);
+
+ kfree(mode, sizeof(struct drm_display_mode));
+}
+
+static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
+{
+ struct drm_property *edid;
+ struct drm_property *dpms;
+
+ /*
+ * Standard properties (apply to all connectors)
+ */
+ edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "EDID", 0);
+ dev->mode_config.edid_property = edid;
+
+ dpms = drm_property_create_enum(dev, 0,
+ "DPMS", drm_dpms_enum_list,
+ ARRAY_SIZE(drm_dpms_enum_list));
+ dev->mode_config.dpms_property = dpms;
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
+ * @dev: DRM device
+ *
+ * Called by a driver the first time a DVI-I connector is made.
+ */
+int drm_mode_create_dvi_i_properties(struct drm_device *dev)
+{
+ struct drm_property *dvi_i_selector;
+ struct drm_property *dvi_i_subconnector;
+
+ if (dev->mode_config.dvi_i_select_subconnector_property)
+ return 0;
+
+ dvi_i_selector =
+ drm_property_create_enum(dev, 0,
+ "select subconnector",
+ drm_dvi_i_select_enum_list,
+ ARRAY_SIZE(drm_dvi_i_select_enum_list));
+ dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+
+ dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "subconnector",
+ drm_dvi_i_subconnector_enum_list,
+ ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+ dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+
+ return 0;
+}
+
+/**
+ * drm_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @num_modes: number of different TV formats (modes) supported
+ * @modes: array of pointers to strings containing name of each format
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device. Caller is
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+ char *modes[])
+{
+ struct drm_property *tv_selector;
+ struct drm_property *tv_subconnector;
+ int i;
+
+ if (dev->mode_config.tv_select_subconnector_property)
+ return 0;
+
+ /*
+ * Basic connector properties
+ */
+ tv_selector = drm_property_create_enum(dev, 0,
+ "select subconnector",
+ drm_tv_select_enum_list,
+ ARRAY_SIZE(drm_tv_select_enum_list));
+ dev->mode_config.tv_select_subconnector_property = tv_selector;
+
+ tv_subconnector =
+ drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "subconnector",
+ drm_tv_subconnector_enum_list,
+ ARRAY_SIZE(drm_tv_subconnector_enum_list));
+ dev->mode_config.tv_subconnector_property = tv_subconnector;
+
+ /*
+ * Other, TV specific properties: margins & TV modes.
+ */
+ dev->mode_config.tv_left_margin_property =
+ drm_property_create_range(dev, 0, "left margin", 0, 100);
+
+ dev->mode_config.tv_right_margin_property =
+ drm_property_create_range(dev, 0, "right margin", 0, 100);
+
+ dev->mode_config.tv_top_margin_property =
+ drm_property_create_range(dev, 0, "top margin", 0, 100);
+
+ dev->mode_config.tv_bottom_margin_property =
+ drm_property_create_range(dev, 0, "bottom margin", 0, 100);
+
+ dev->mode_config.tv_mode_property =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", num_modes);
+ for (i = 0; i < num_modes; i++)
+ (void) drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+ i, modes[i]);
+
+ dev->mode_config.tv_brightness_property =
+ drm_property_create_range(dev, 0, "brightness", 0, 100);
+
+ dev->mode_config.tv_contrast_property =
+ drm_property_create_range(dev, 0, "contrast", 0, 100);
+
+ dev->mode_config.tv_flicker_reduction_property =
+ drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
+
+ dev->mode_config.tv_overscan_property =
+ drm_property_create_range(dev, 0, "overscan", 0, 100);
+
+ dev->mode_config.tv_saturation_property =
+ drm_property_create_range(dev, 0, "saturation", 0, 100);
+
+ dev->mode_config.tv_hue_property =
+ drm_property_create_range(dev, 0, "hue", 0, 100);
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_scaling_mode_property - create scaling mode property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_scaling_mode_property(struct drm_device *dev)
+{
+ struct drm_property *scaling_mode;
+
+ if (dev->mode_config.scaling_mode_property)
+ return 0;
+
+ scaling_mode =
+ drm_property_create_enum(dev, 0, "scaling mode",
+ drm_scaling_mode_enum_list,
+ ARRAY_SIZE(drm_scaling_mode_enum_list));
+
+ dev->mode_config.scaling_mode_property = scaling_mode;
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_dithering_property - create dithering property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dithering_property(struct drm_device *dev)
+{
+ struct drm_property *dithering_mode;
+
+ if (dev->mode_config.dithering_mode_property)
+ return 0;
+
+ dithering_mode =
+ drm_property_create_enum(dev, 0, "dithering",
+ drm_dithering_mode_enum_list,
+ ARRAY_SIZE(drm_dithering_mode_enum_list));
+ dev->mode_config.dithering_mode_property = dithering_mode;
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_dirty_property - create dirty property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dirty_info_property(struct drm_device *dev)
+{
+ struct drm_property *dirty_info;
+
+ if (dev->mode_config.dirty_info_property)
+ return 0;
+
+ dirty_info =
+ drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "dirty",
+ drm_dirty_info_enum_list,
+ ARRAY_SIZE(drm_dirty_info_enum_list));
+ dev->mode_config.dirty_info_property = dirty_info;
+
+ return 0;
+}
+
+
+static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+{
+ uint32_t total_objects = 0;
+
+ total_objects += dev->mode_config.num_crtc;
+ total_objects += dev->mode_config.num_connector;
+ total_objects += dev->mode_config.num_encoder;
+
+ group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
+ if (!group->id_list)
+ return -ENOMEM;
+
+ group->num_crtcs = 0;
+ group->num_connectors = 0;
+ group->num_encoders = 0;
+ return 0;
+}
+
+int drm_mode_group_init_legacy_group(struct drm_device *dev,
+ struct drm_mode_group *group)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ if ((ret = drm_mode_group_init(dev, group)))
+ return ret;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head)
+ group->id_list[group->num_crtcs++] = crtc->base.id;
+
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head)
+ group->id_list[group->num_crtcs + group->num_encoders++] =
+ encoder->base.id;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head)
+ group->id_list[group->num_crtcs + group->num_encoders +
+ group->num_connectors++] = connector->base.id;
+
+ return 0;
+}
+
+
+/**
+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
+ * @out: drm_mode_modeinfo struct to return to the user
+ * @in: drm_display_mode to use
+ *
+ *
+ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
+ * the user.
+ */
+static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
+ const struct drm_display_mode *in)
+{
+ if (in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX ||
+ in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
+ in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
+ in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
+ in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX)
+ DRM_ERROR("timing values too large for mode info\n");
+
+ out->clock = in->clock;
+ out->hdisplay = in->hdisplay;
+ out->hsync_start = in->hsync_start;
+ out->hsync_end = in->hsync_end;
+ out->htotal = in->htotal;
+ out->hskew = in->hskew;
+ out->vdisplay = in->vdisplay;
+ out->vsync_start = in->vsync_start;
+ out->vsync_end = in->vsync_end;
+ out->vtotal = in->vtotal;
+ out->vscan = in->vscan;
+ out->vrefresh = in->vrefresh;
+ out->flags = in->flags;
+ out->type = in->type;
+ (void) strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * @out: drm_display_mode to return to the user
+ * @in: drm_mode_modeinfo to use
+ *
+ *
+ * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
+ * the caller.
+ */
+static int drm_crtc_convert_umode(struct drm_display_mode *out,
+ const struct drm_mode_modeinfo *in)
+{
+ if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
+ return -ERANGE;
+
+ out->clock = in->clock;
+ out->hdisplay = in->hdisplay;
+ out->hsync_start = in->hsync_start;
+ out->hsync_end = in->hsync_end;
+ out->htotal = in->htotal;
+ out->hskew = in->hskew;
+ out->vdisplay = in->vdisplay;
+ out->vsync_start = in->vsync_start;
+ out->vsync_end = in->vsync_end;
+ out->vtotal = in->vtotal;
+ out->vscan = in->vscan;
+ out->vrefresh = in->vrefresh;
+ out->flags = in->flags;
+ out->type = in->type;
+ (void) strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+
+ return 0;
+}
+
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_getresources(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_card_res *card_res = data;
+ struct list_head *lh;
+ struct drm_framebuffer *fb;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int ret = 0;
+ int connector_count = 0;
+ int crtc_count = 0;
+ int fb_count = 0;
+ int encoder_count = 0;
+ int copied = 0, i;
+ uint32_t __user *fb_id;
+ uint32_t __user *crtc_id;
+ uint32_t __user *connector_id;
+ uint32_t __user *encoder_id;
+ struct drm_mode_group *mode_group;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&file->fbs_lock);
+
+ /*
+ * For the non-control nodes we need to limit the list of resources
+ * by IDs in the group list for this node
+ */
+ list_for_each(lh, &file->fbs)
+ fb_count++;
+
+ /* handle this in 4 parts */
+ /* FBs */
+ if (card_res->count_fbs >= fb_count) {
+ copied = 0;
+ fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+ list_for_each_entry(fb, struct drm_framebuffer, &file->fbs, filp_head) {
+ if (put_user(fb->base.id, fb_id + copied)) {
+ mutex_unlock(&file->fbs_lock);
+ return -EFAULT;
+ }
+ copied++;
+ }
+ }
+ card_res->count_fbs = fb_count;
+ mutex_unlock(&file->fbs_lock);
+
+ drm_modeset_lock_all(dev);
+
+ mode_group = &file->master->minor->mode_group;
+ if (file->master->minor->type == DRM_MINOR_CONTROL) {
+
+ list_for_each(lh, &dev->mode_config.crtc_list)
+ crtc_count++;
+
+ list_for_each(lh, &dev->mode_config.connector_list)
+ connector_count++;
+
+ list_for_each(lh, &dev->mode_config.encoder_list)
+ encoder_count++;
+ } else {
+
+ crtc_count = mode_group->num_crtcs;
+ connector_count = mode_group->num_connectors;
+ encoder_count = mode_group->num_encoders;
+ }
+
+ card_res->max_height = dev->mode_config.max_height;
+ card_res->min_height = dev->mode_config.min_height;
+ card_res->max_width = dev->mode_config.max_width;
+ card_res->min_width = dev->mode_config.min_width;
+
+ /* CRTCs */
+ if (card_res->count_crtcs >= crtc_count) {
+ copied = 0;
+ crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+ if (file->master->minor->type == DRM_MINOR_CONTROL) {
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list,
+ head) {
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ if (put_user(crtc->base.id, crtc_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ } else {
+ for (i = 0; i < mode_group->num_crtcs; i++) {
+ if (put_user(mode_group->id_list[i],
+ crtc_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ card_res->count_crtcs = crtc_count;
+
+ /* Encoders */
+ if (card_res->count_encoders >= encoder_count) {
+ copied = 0;
+ encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+ if (file->master->minor->type == DRM_MINOR_CONTROL) {
+ list_for_each_entry(encoder, struct drm_encoder,
+ &dev->mode_config.encoder_list,
+ head) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+ drm_get_encoder_name(encoder));
+ if (put_user(encoder->base.id, encoder_id +
+ copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ } else {
+ for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
+ if (put_user(mode_group->id_list[i],
+ encoder_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+
+ }
+ }
+ card_res->count_encoders = encoder_count;
+
+ /* Connectors */
+ if (card_res->count_connectors >= connector_count) {
+ copied = 0;
+ connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+ if (file->master->minor->type == DRM_MINOR_CONTROL) {
+ list_for_each_entry(connector, struct drm_connector,
+ &dev->mode_config.connector_list,
+ head) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector));
+ if (put_user(connector->base.id,
+ connector_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ } else {
+ int start = mode_group->num_crtcs +
+ mode_group->num_encoders;
+ for (i = start; i < start + mode_group->num_connectors; i++) {
+ if (put_user(mode_group->id_list[i],
+ connector_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ card_res->count_connectors = connector_count;
+
+ DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
+ card_res->count_connectors, card_res->count_encoders);
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/**
+ * drm_mode_getcrtc - get CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ *
+ * Construct a CRTC configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_getcrtc(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_crtc *crtc_resp = data;
+ struct drm_crtc *crtc;
+ struct drm_mode_object *obj;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ crtc_resp->x = crtc->x;
+ crtc_resp->y = crtc->y;
+ crtc_resp->gamma_size = crtc->gamma_size;
+ if (crtc->fb)
+ crtc_resp->fb_id = crtc->fb->base.id;
+ else
+ crtc_resp->fb_id = 0;
+
+ if (crtc->enabled) {
+
+ drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
+ crtc_resp->mode_valid = 1;
+
+ } else {
+ crtc_resp->mode_valid = 0;
+ }
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/**
+ * drm_mode_getconnector - get connector configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ *
+ * Construct a connector configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_getconnector(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_get_connector *out_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_connector *connector;
+ struct drm_display_mode *mode;
+ int mode_count = 0;
+ int props_count = 0;
+ int encoders_count = 0;
+ int ret = 0;
+ int copied = 0;
+ int i;
+ struct drm_mode_modeinfo u_mode;
+ struct drm_mode_modeinfo __user *mode_ptr;
+ uint32_t __user *prop_ptr;
+ uint64_t __user *prop_values;
+ uint32_t __user *encoder_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ (void) memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, out_resp->connector_id,
+ DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+
+ props_count = connector->properties.count;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] != 0) {
+ encoders_count++;
+ }
+ }
+
+ if (out_resp->count_modes == 0) {
+ connector->funcs->fill_modes(connector,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ }
+
+ /* delayed so we get modes regardless of pre-fill_modes state */
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head)
+ mode_count++;
+
+ out_resp->connector_id = connector->base.id;
+ out_resp->connector_type = connector->connector_type;
+ out_resp->connector_type_id = connector->connector_type_id;
+ out_resp->mm_width = connector->display_info.width_mm;
+ out_resp->mm_height = connector->display_info.height_mm;
+ out_resp->subpixel = connector->display_info.subpixel_order;
+ out_resp->connection = connector->status;
+ if (connector->encoder)
+ out_resp->encoder_id = connector->encoder->base.id;
+ else
+ out_resp->encoder_id = 0;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if ((out_resp->count_modes >= mode_count) && mode_count) {
+ copied = 0;
+ mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head) {
+ drm_crtc_convert_to_umode(&u_mode, mode);
+ if (DRM_COPY_TO_USER(mode_ptr + copied,
+ &u_mode, sizeof(u_mode))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_modes = mode_count;
+
+ if ((out_resp->count_props >= props_count) && props_count) {
+ copied = 0;
+ prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
+ prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
+ for (i = 0; i < connector->properties.count; i++) {
+ if (put_user(connector->properties.ids[i],
+ prop_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (put_user(connector->properties.values[i],
+ prop_values + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_props = props_count;
+
+ if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+ copied = 0;
+ encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] != 0) {
+ if (put_user(connector->encoder_ids[i],
+ encoder_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ out_resp->count_encoders = encoders_count;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_getencoder(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_get_encoder *enc_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, enc_resp->encoder_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ encoder = obj_to_encoder(obj);
+
+ if (encoder->crtc)
+ enc_resp->crtc_id = encoder->crtc->base.id;
+ else
+ enc_resp->crtc_id = 0;
+ enc_resp->encoder_type = encoder->encoder_type;
+ enc_resp->encoder_id = encoder->base.id;
+ enc_resp->possible_crtcs = encoder->possible_crtcs;
+ enc_resp->possible_clones = encoder->possible_clones;
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/**
+ * drm_mode_getplane_res - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ *
+ * Return an plane count and set of IDs.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_getplane_res(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_get_plane_res *plane_resp = data;
+ struct drm_mode_config *config;
+ struct drm_plane *plane;
+ uint32_t __user *plane_ptr;
+ int copied = 0, ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ config = &dev->mode_config;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (config->num_plane &&
+ (plane_resp->count_planes >= config->num_plane)) {
+ plane_ptr = (uint32_t *)(unsigned long)plane_resp->plane_id_ptr;
+
+ list_for_each_entry(plane, struct drm_plane, &config->plane_list, head) {
+ if (put_user(plane->base.id, plane_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ plane_resp->count_planes = config->num_plane;
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/**
+ * drm_mode_getplane - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ *
+ * Return plane info, including formats supported, gamma size, any
+ * current fb, etc.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_getplane(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_get_plane *plane_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ uint32_t *format_ptr;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, plane_resp->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out;
+ }
+ plane = obj_to_plane(obj);
+
+ if (plane->crtc)
+ plane_resp->crtc_id = plane->crtc->base.id;
+ else
+ plane_resp->crtc_id = 0;
+
+ if (plane->fb)
+ plane_resp->fb_id = plane->fb->base.id;
+ else
+ plane_resp->fb_id = 0;
+
+ plane_resp->plane_id = plane->base.id;
+ plane_resp->possible_crtcs = plane->possible_crtcs;
+ plane_resp->gamma_size = 0;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (plane->format_count &&
+ (plane_resp->count_format_types >= plane->format_count)) {
+ format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+ if (DRM_COPY_TO_USER(format_ptr,
+ plane->format_types,
+ sizeof(uint32_t) * plane->format_count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ plane_resp->count_format_types = plane->format_count;
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/**
+ * drm_mode_setplane - set up or tear down an plane
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_prive: DRM file info
+ *
+ *
+ * Set plane info, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_setplane(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_set_plane *plane_req = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+ int ret = 0;
+ unsigned int fb_width, fb_height;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ /*
+ * First, find the plane, crtc, and fb objects. If not available,
+ * we don't bother to call the driver.
+ */
+ obj = drm_mode_object_find(dev, plane_req->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ plane_req->plane_id);
+ return -ENOENT;
+ }
+ plane = obj_to_plane(obj);
+
+ /* No fb means shut it down */
+ if (!plane_req->fb_id) {
+ drm_modeset_lock_all(dev);
+ old_fb = plane->fb;
+ plane->funcs->disable_plane(plane);
+ plane->crtc = NULL;
+ plane->fb = NULL;
+ drm_modeset_unlock_all(dev);
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, plane_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+ plane_req->crtc_id);
+ ret = -ENOENT;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+ if (!fb) {
+ DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+ plane_req->fb_id);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ /* Check whether this plane supports the fb pixel format. */
+ for (i = 0; i < plane->format_count; i++)
+ if (fb->pixel_format == plane->format_types[i])
+ break;
+ if (i == plane->format_count) {
+ DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fb_width = fb->width << 16;
+ fb_height = fb->height << 16;
+
+ /* Make sure source coordinates are inside the fb. */
+ if (plane_req->src_w > fb_width ||
+ plane_req->src_x > fb_width - plane_req->src_w ||
+ plane_req->src_h > fb_height ||
+ plane_req->src_y > fb_height - plane_req->src_h) {
+ DRM_DEBUG_KMS("Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ plane_req->src_w >> 16,
+ ((plane_req->src_w & 0xffff) * 15625) >> 10,
+ plane_req->src_h >> 16,
+ ((plane_req->src_h & 0xffff) * 15625) >> 10,
+ plane_req->src_x >> 16,
+ ((plane_req->src_x & 0xffff) * 15625) >> 10,
+ plane_req->src_y >> 16,
+ ((plane_req->src_y & 0xffff) * 15625) >> 10);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* Give drivers some help against integer overflows */
+ if (plane_req->crtc_w > INT_MAX ||
+ plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+ plane_req->crtc_h > INT_MAX ||
+ plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->crtc_x, plane_req->crtc_y);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ drm_modeset_lock_all(dev);
+ ret = plane->funcs->update_plane(plane, crtc, fb,
+ plane_req->crtc_x, plane_req->crtc_y,
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->src_x, plane_req->src_y,
+ plane_req->src_w, plane_req->src_h);
+ if (!ret) {
+ old_fb = plane->fb;
+ plane->crtc = crtc;
+ plane->fb = fb;
+ fb = NULL;
+ }
+ drm_modeset_unlock_all(dev);
+
+out:
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
+ return ret;
+}
+
+/**
+ * drm_mode_set_config_internal - helper to call ->set_config
+ * @set: modeset config to set
+ *
+ * This is a little helper to wrap internal calls to the ->set_config driver
+ * interface. The only thing it adds is correct refcounting dance.
+ */
+int drm_mode_set_config_internal(struct drm_mode_set *set)
+{
+ struct drm_crtc *crtc = set->crtc;
+ struct drm_framebuffer *fb;
+ struct drm_crtc *tmp;
+ int ret;
+
+ /*
+ * NOTE: ->set_config can also disable other crtcs (if we steal all
+ * connectors from it), hence we need to refcount the fbs across all
+ * crtcs. Atomic modeset will have saner semantics ...
+ */
+ list_for_each_entry(tmp, struct drm_crtc, &crtc->dev->mode_config.crtc_list, head)
+ tmp->old_fb = tmp->fb;
+
+ fb = set->fb;
+ ret = crtc->funcs->set_config(set);
+ if (ret == 0) {
+ /* crtc->fb must be updated by ->set_config, enforces this. */
+ if (fb != crtc->fb)
+ DRM_ERROR("fb 0x%lx != crtc->fb 0x%lx", (uintptr_t)fb, (uintptr_t)crtc->fb);
+ }
+
+ list_for_each_entry(tmp, struct drm_crtc, &crtc->dev->mode_config.crtc_list, head) {
+ if (tmp->fb)
+ drm_framebuffer_reference(tmp->fb);
+ if (tmp->old_fb)
+ drm_framebuffer_unreference(tmp->old_fb);
+ }
+
+ return ret;
+}
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ *
+ * Build a new CRTC configuration based on user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_setcrtc(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_mode_crtc *crtc_req = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_connector **connector_set = NULL, *connector;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_display_mode *mode = NULL;
+ struct drm_mode_set set;
+ uint32_t __user *set_connectors_ptr;
+ int ret;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ /* For some reason crtc x/y offsets are signed internally. */
+ if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
+ return -ERANGE;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, crtc_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+ if (crtc_req->mode_valid) {
+ int hdisplay, vdisplay;
+ /* If we have a mode we need a framebuffer. */
+ /* If we pass -1, set the mode with the currently bound fb */
+ /* LINTED */
+ if (crtc_req->fb_id == -1) {
+ if (!crtc->fb) {
+ DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ fb = crtc->fb;
+ /* Make refcounting symmetric with the lookup path. */
+ drm_framebuffer_reference(fb);
+ } else {
+ fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
+ if (!fb) {
+ DRM_DEBUG_KMS("Unknown FB ID%d\n",
+ crtc_req->fb_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ mode = drm_mode_create(dev);
+ if (!mode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = drm_crtc_convert_umode(mode, &crtc_req->mode);
+ if (ret) {
+ DRM_DEBUG_KMS("Invalid mode\n");
+ goto out;
+ }
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+
+ hdisplay = mode->hdisplay;
+ vdisplay = mode->vdisplay;
+
+ if (crtc->invert_dimensions)
+ swap(hdisplay, vdisplay);
+
+ if (hdisplay > fb->width ||
+ vdisplay > fb->height ||
+ crtc_req->x > fb->width - hdisplay ||
+ crtc_req->y > fb->height - vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+ fb->width, fb->height,
+ hdisplay, vdisplay, crtc_req->x, crtc_req->y,
+ crtc->invert_dimensions ? " (inverted)" : "");
+ ret = -ENOSPC;
+ goto out;
+ }
+ }
+
+ if (crtc_req->count_connectors == 0 && mode) {
+ DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
+ DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
+ crtc_req->count_connectors);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (crtc_req->count_connectors > 0) {
+ u32 out_id;
+
+ /* Avoid unbounded kernel memory allocation */
+ if (crtc_req->count_connectors > config->num_connector) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ connector_set = kmalloc(crtc_req->count_connectors *
+ sizeof(struct drm_connector *),
+ GFP_KERNEL);
+ if (!connector_set) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < crtc_req->count_connectors; i++) {
+ set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
+ if (get_user(out_id, &set_connectors_ptr[i])) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, out_id,
+ DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ DRM_DEBUG_KMS("Connector id %d unknown\n",
+ out_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector));
+
+ connector_set[i] = connector;
+ }
+ }
+
+ set.crtc = crtc;
+ set.x = crtc_req->x;
+ set.y = crtc_req->y;
+ set.mode = mode;
+ set.connectors = connector_set;
+ set.num_connectors = crtc_req->count_connectors;
+ set.fb = fb;
+ ret = drm_mode_set_config_internal(&set);
+
+out:
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ kfree(connector_set, crtc_req->count_connectors * sizeof(struct drm_connector *));
+ drm_mode_destroy(dev, mode);
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+static int drm_mode_cursor_common(struct drm_device *dev,
+ struct drm_mode_cursor2 *req,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
+ return -EINVAL;
+
+ obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
+ return -EINVAL;
+ }
+ crtc = obj_to_crtc(obj);
+
+ mutex_lock(&crtc->mutex);
+ if (req->flags & DRM_MODE_CURSOR_BO) {
+ if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
+ ret = -ENXIO;
+ goto out;
+ }
+ /* Turns off the cursor if handle is 0 */
+ if (crtc->funcs->cursor_set2)
+ ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
+ req->width, req->height, req->hot_x, req->hot_y);
+ else
+ ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+ req->width, req->height);
+ }
+
+ if (req->flags & DRM_MODE_CURSOR_MOVE) {
+ if (crtc->funcs->cursor_move) {
+ ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+ } else {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&crtc->mutex);
+
+ return ret;
+
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_cursor_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_cursor *req = data;
+ struct drm_mode_cursor2 new_req;
+
+ (void) memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
+ new_req.hot_x = new_req.hot_y = 0;
+
+ return drm_mode_cursor_common(dev, &new_req, file);
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_cursor2_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_cursor2 *req = data;
+ return drm_mode_cursor_common(dev, req, file);
+}
+
+/* Original addfb only supported RGB formats, so figure out which one */
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+ uint32_t fmt;
+
+ switch (bpp) {
+ case 8:
+ fmt = DRM_FORMAT_C8;
+ break;
+ case 16:
+ if (depth == 15)
+ fmt = DRM_FORMAT_XRGB1555;
+ else
+ fmt = DRM_FORMAT_RGB565;
+ break;
+ case 24:
+ fmt = DRM_FORMAT_RGB888;
+ break;
+ case 32:
+ if (depth == 24)
+ fmt = DRM_FORMAT_XRGB8888;
+ else if (depth == 30)
+ fmt = DRM_FORMAT_XRGB2101010;
+ else
+ fmt = DRM_FORMAT_ARGB8888;
+ break;
+ default:
+ DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
+ fmt = DRM_FORMAT_XRGB8888;
+ break;
+ }
+
+ return fmt;
+}
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ *
+ * Add a new FB to the specified CRTC, given a user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_addfb(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_fb_cmd *or = data;
+ struct drm_mode_fb_cmd2 r;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+
+ (void) memset(&r, 0, sizeof(struct drm_mode_fb_cmd2));
+
+ /* Use new struct with format internally */
+ r.fb_id = or->fb_id;
+ r.width = or->width;
+ r.height = or->height;
+ r.pitches[0] = or->pitch;
+ r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+ r.handles[0] = or->handle;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if ((config->min_width > r.width) || (r.width > config->max_width))
+ return -EINVAL;
+
+ if ((config->min_height > r.height) || (r.height > config->max_height))
+ return -EINVAL;
+
+ fb = dev->mode_config.funcs->fb_create(dev, file, &r);
+ if (fb == NULL) {
+ DRM_DEBUG_KMS("could not create framebuffer\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&file->fbs_lock);
+ or->fb_id = fb->base.id;
+ list_add(&fb->filp_head, &file->fbs, (caddr_t)fb);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ mutex_unlock(&file->fbs_lock);
+
+ return ret;
+}
+
+static int format_check(const struct drm_mode_fb_cmd2 *r)
+{
+ uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_XBGR4444:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_BGRX4444:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ABGR4444:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_AYUV:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
+{
+ int ret, hsub, vsub, num_planes, i;
+
+ ret = format_check(r);
+ if (ret) {
+ DRM_DEBUG_KMS("bad framebuffer format %s\n",
+ drm_get_format_name(r->pixel_format));
+ return ret;
+ }
+
+ hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
+ num_planes = drm_format_num_planes(r->pixel_format);
+
+ if (r->width == 0 || r->width % hsub) {
+ DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
+ return -EINVAL;
+ }
+
+ if (r->height == 0 || r->height % vsub) {
+ DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int width = r->width / (i != 0 ? hsub : 1);
+ unsigned int height = r->height / (i != 0 ? vsub : 1);
+ unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+
+ if (!r->handles[i]) {
+ DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+ return -EINVAL;
+ }
+
+ if ((uint64_t) width * cpp > UINT_MAX)
+ return -ERANGE;
+
+ if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+ return -ERANGE;
+
+ if (r->pitches[i] < width * cpp) {
+ DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @arg: arg from ioctl
+ *
+ * Add a new FB to the specified CRTC, given a user request with format.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_addfb2(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_fb_cmd2 *r = data;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+ DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+ return -EINVAL;
+ }
+
+ if ((config->min_width > r->width) || (r->width > config->max_width)) {
+ DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
+ r->width, config->min_width, config->max_width);
+ return -EINVAL;
+ }
+ if ((config->min_height > r->height) || (r->height > config->max_height)) {
+ DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
+ r->height, config->min_height, config->max_height);
+ return -EINVAL;
+ }
+
+ ret = framebuffer_check(r);
+ if (ret)
+ return ret;
+
+ fb = dev->mode_config.funcs->fb_create(dev, file, r);
+ if (fb == NULL) {
+ DRM_DEBUG_KMS("could not create framebuffer\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&file->fbs_lock);
+ r->fb_id = fb->base.id;
+ list_add(&fb->filp_head, &file->fbs, (caddr_t)fb);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ mutex_unlock(&file->fbs_lock);
+
+ return ret;
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @arg: arg from ioctl
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED */
+int drm_mode_rmfb(DRM_IOCTL_ARGS)
+{
+ struct drm_framebuffer *fb = NULL;
+ struct drm_framebuffer *fbl = NULL;
+ uint32_t *id = data;
+ int found = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&file->fbs_lock);
+ mutex_lock(&dev->mode_config.fb_lock);
+ fb = __drm_framebuffer_lookup(dev, *id);
+ if (!fb)
+ goto fail_lookup;
+
+ list_for_each_entry(fbl, struct drm_framebuffer, &file->fbs, filp_head)
+ if (fb == fbl)
+ found = 1;
+
+ if (!found)
+ goto fail_lookup;
+
+ /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
+ __drm_framebuffer_unregister(dev, fb);
+
+ list_del_init(&fb->filp_head);
+ mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&file->fbs_lock);
+
+ drm_framebuffer_remove(fb);
+ return 0;
+
+fail_lookup:
+ mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&file->fbs_lock);
+
+ return -EINVAL;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @arg: arg from ioctl
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED */
+int drm_mode_getfb(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_fb_cmd *r = data;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, r->fb_id);
+ if (!fb)
+ return -EINVAL;
+
+ r->height = fb->height;
+ r->width = fb->width;
+ r->depth = fb->depth;
+ r->bpp = fb->bits_per_pixel;
+ r->pitch = fb->pitches[0];
+ if (fb->funcs->create_handle)
+ ret = fb->funcs->create_handle(fb, file, &r->handle);
+ else
+ ret = -ENODEV;
+
+ drm_framebuffer_unreference(fb);
+
+ return ret;
+}
+
+/* LINTED */
+int drm_mode_dirtyfb_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_clip_rect __user *clips_ptr;
+ struct drm_clip_rect *clips = NULL;
+ struct drm_mode_fb_dirty_cmd *r = data;
+ struct drm_framebuffer *fb;
+ unsigned flags;
+ int num_clips;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, r->fb_id);
+ if (!fb)
+ return -EINVAL;
+
+ num_clips = r->num_clips;
+ clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+
+ if (!num_clips != !clips_ptr) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+
+ flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+ /* If userspace annotates copy, clips must come in pairs */
+ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+
+ if (num_clips && clips_ptr) {
+ if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+ clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ if (!clips) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ ret = copy_from_user(clips, clips_ptr,
+ num_clips * sizeof(*clips));
+ if (ret) {
+ ret = -EFAULT;
+ goto out_err2;
+ }
+ }
+
+ if (fb->funcs->dirty) {
+ drm_modeset_lock_all(dev);
+ ret = fb->funcs->dirty(fb, file, flags, r->color,
+ clips, num_clips);
+ drm_modeset_unlock_all(dev);
+ } else {
+ ret = -ENOSYS;
+ }
+
+out_err2:
+ if (clips)
+ kfree(clips, num_clips * sizeof(*clips));
+out_err1:
+ drm_framebuffer_unreference(fb);
+ return ret;
+}
+
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @filp: file * from the ioctl
+ *
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+void drm_fb_release(struct drm_file *priv)
+{
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_framebuffer *fb, *tfb;
+
+ mutex_lock(&priv->fbs_lock);
+ list_for_each_entry_safe(fb, tfb, struct drm_framebuffer, &priv->fbs, filp_head) {
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
+ __drm_framebuffer_unregister(dev, fb);
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ list_del_init(&fb->filp_head);
+
+ /* This will also drop the fpriv->fbs reference. */
+ drm_framebuffer_remove(fb);
+ }
+ mutex_unlock(&priv->fbs_lock);
+}
+
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ const char *name, int num_values)
+{
+ struct drm_property *property = NULL;
+ int ret;
+
+ property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+ if (!property)
+ return NULL;
+
+ if (num_values) {
+ property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
+ if (!property->values)
+ goto fail;
+ }
+
+ ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+ if (ret)
+ goto fail;
+
+ property->flags = flags;
+ property->num_values = num_values;
+ INIT_LIST_HEAD(&property->enum_blob_list);
+
+ if (name) {
+ (void) strncpy(property->name, name, DRM_PROP_NAME_LEN);
+ property->name[DRM_PROP_NAME_LEN-1] = '\0';
+ }
+
+ list_add_tail(&property->head, &dev->mode_config.property_list, (caddr_t)property);
+ return property;
+fail:
+ kfree(property->values, sizeof(uint64_t)*num_values);
+ kfree(property, sizeof(struct drm_property));
+ return NULL;
+}
+struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
+{
+ struct drm_property *property;
+ int i, ret;
+
+ flags |= DRM_MODE_PROP_ENUM;
+
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
+
+ for (i = 0; i < num_values; i++) {
+ ret = drm_property_add_enum(property, i,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+ }
+
+ return property;
+}
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
+{
+ struct drm_property *property;
+ int i, ret;
+
+ flags |= DRM_MODE_PROP_BITMASK;
+
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
+
+ for (i = 0; i < num_values; i++) {
+ ret = drm_property_add_enum(property, i,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+ }
+
+ return property;
+}
+
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max)
+{
+ struct drm_property *property;
+
+ flags |= DRM_MODE_PROP_RANGE;
+
+ property = drm_property_create(dev, flags, name, 2);
+ if (!property)
+ return NULL;
+
+ property->values[0] = min;
+ property->values[1] = max;
+
+ return property;
+}
+
+int drm_property_add_enum(struct drm_property *property, int index,
+ uint64_t value, const char *name)
+{
+ struct drm_property_enum *prop_enum;
+
+ if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
+ return -EINVAL;
+
+ /*
+ * Bitmask enum properties have the additional constraint of values
+ * from 0 to 63
+ */
+ if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
+ return -EINVAL;
+
+ if (!list_empty(&property->enum_blob_list)) {
+ list_for_each_entry(prop_enum, struct drm_property_enum, &property->enum_blob_list, head) {
+ if (prop_enum->value == value) {
+ (void) strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+ return 0;
+ }
+ }
+ }
+
+ prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
+ if (!prop_enum)
+ return -ENOMEM;
+
+ (void) strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+ prop_enum->value = value;
+
+ property->values[index] = value;
+ list_add_tail(&prop_enum->head, &property->enum_blob_list, (caddr_t)prop_enum);
+ return 0;
+}
+
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+ struct drm_property_enum *prop_enum, *pt;
+
+ list_for_each_entry_safe(prop_enum, pt, struct drm_property_enum, &property->enum_blob_list, head) {
+ list_del(&prop_enum->head);
+ kfree(prop_enum, sizeof(struct drm_property_enum));
+ }
+
+ if (property->num_values)
+ kfree(property->values, sizeof(uint64_t) * property->num_values);
+ drm_mode_object_put(dev, &property->base);
+ list_del(&property->head);
+ kfree(property, sizeof(struct drm_property));
+}
+
+
+void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val)
+{
+ int count = obj->properties->count;
+
+ if (count == DRM_OBJECT_MAX_PROPERTY) {
+ DRM_ERROR("Failed to attach object property (type: 0x%x). Please "
+ "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+ "you see this message on the same object type.\n",
+ obj->type);
+ return;
+ }
+
+ obj->properties->ids[count] = property->base.id;
+ obj->properties->values[count] = init_val;
+ obj->properties->count++;
+}
+
+int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t val)
+{
+ int i;
+
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ obj->properties->values[i] = val;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t *val)
+{
+ int i;
+
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ *val = obj->properties->values[i];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/* LINTED */
+int drm_mode_getproperty_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_object *obj;
+ struct drm_mode_get_property *out_resp = data;
+ struct drm_property *property;
+ int enum_count = 0;
+ int blob_count = 0;
+ int value_count = 0;
+ int ret = 0, i;
+ int copied;
+ struct drm_property_enum *prop_enum;
+ struct drm_mode_property_enum __user *enum_ptr;
+ struct drm_property_blob *prop_blob;
+ uint32_t *blob_id_ptr;
+ uint64_t *values_ptr;
+ uint32_t *blob_length_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+ if (!obj) {
+ ret = -EINVAL;
+ goto done;
+ }
+ property = obj_to_property(obj);
+
+ if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
+ list_for_each_entry(prop_enum, struct drm_property_enum, &property->enum_blob_list, head)
+ enum_count++;
+ } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ list_for_each_entry(prop_blob, struct drm_property_blob, &property->enum_blob_list, head)
+ blob_count++;
+ }
+
+ value_count = property->num_values;
+
+ (void) strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+ out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+ out_resp->flags = property->flags;
+
+ if ((out_resp->count_values >= value_count) && value_count) {
+ values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
+ for (i = 0; i < value_count; i++) {
+ if (DRM_COPY_TO_USER(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
+ ret = -EFAULT;
+ goto done;
+ }
+ }
+ }
+ out_resp->count_values = value_count;
+
+ if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
+ if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+ copied = 0;
+ enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
+ list_for_each_entry(prop_enum, struct drm_property_enum, &property->enum_blob_list, head) {
+
+ if (DRM_COPY_TO_USER(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (DRM_COPY_TO_USER(&enum_ptr[copied].name,
+ &prop_enum->name, DRM_PROP_NAME_LEN)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_enum_blobs = enum_count;
+ }
+
+ if (property->flags & DRM_MODE_PROP_BLOB) {
+ if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
+ copied = 0;
+ blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
+ blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
+
+ list_for_each_entry(prop_blob, struct drm_property_blob, &property->enum_blob_list, head) {
+ if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (put_user(prop_blob->length, blob_length_ptr + copied)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ copied++;
+ }
+ }
+ out_resp->count_enum_blobs = blob_count;
+ }
+done:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
+ void *data)
+{
+ struct drm_property_blob *blob;
+ int ret;
+
+ if (!length || !data)
+ return NULL;
+
+ blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+ if (!blob)
+ return NULL;
+
+ ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+ if (ret) {
+ kfree(blob, sizeof(struct drm_property_blob)+length);
+ return NULL;
+ }
+
+ blob->length = length;
+
+ (void) memcpy(blob->data, data, length);
+
+ list_add_tail(&blob->head, &dev->mode_config.property_blob_list, (caddr_t)blob);
+ return blob;
+}
+
+static void drm_property_destroy_blob(struct drm_device *dev,
+ struct drm_property_blob *blob)
+{
+ drm_mode_object_put(dev, &blob->base);
+ list_del(&blob->head);
+ kfree(blob, sizeof(struct drm_property_blob) + blob->length);
+}
+
+/* LINTED */
+int drm_mode_getblob_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_object *obj;
+ struct drm_mode_get_blob *out_resp = data;
+ struct drm_property_blob *blob;
+ int ret = 0;
+ void *blob_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
+ if (!obj) {
+ ret = -EINVAL;
+ goto done;
+ }
+ blob = obj_to_blob(obj);
+
+ if (out_resp->length == blob->length) {
+ blob_ptr = (void *)(unsigned long)out_resp->data;
+ if (DRM_COPY_TO_USER(blob_ptr, blob->data, blob->length)){
+ ret = -EFAULT;
+ goto done;
+ }
+ }
+ out_resp->length = blob->length;
+
+done:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ int ret, size;
+
+ if (connector->edid_blob_ptr)
+ drm_property_destroy_blob(dev, connector->edid_blob_ptr);
+
+ /* Delete edid, when there is none. */
+ if (!edid) {
+ connector->edid_blob_ptr = NULL;
+ ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
+ return ret;
+ }
+
+ size = EDID_LENGTH * (1 + edid->extensions);
+ connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
+ size, edid);
+ if (!connector->edid_blob_ptr)
+ return -EINVAL;
+
+ ret = drm_object_property_set_value(&connector->base,
+ dev->mode_config.edid_property,
+ connector->edid_blob_ptr->base.id);
+
+ return ret;
+}
+
+static bool drm_property_change_is_valid(struct drm_property *property,
+ uint64_t value)
+{
+ if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ return false;
+ if (property->flags & DRM_MODE_PROP_RANGE) {
+ if (value < property->values[0] || value > property->values[1])
+ return false;
+ return true;
+ } else if (property->flags & DRM_MODE_PROP_BITMASK) {
+ int i;
+ uint64_t valid_mask = 0;
+ for (i = 0; i < property->num_values; i++)
+ valid_mask |= (1ULL << property->values[i]);
+ return !(value & ~valid_mask);
+ } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ /* Only the driver knows */
+ return true;
+ } else {
+ int i;
+ for (i = 0; i < property->num_values; i++)
+ if (property->values[i] == value)
+ return true;
+ return false;
+ }
+}
+
+int drm_mode_connector_property_set_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_connector_set_property *conn_set_prop = data;
+ struct drm_mode_obj_set_property obj_set_prop = {
+ .value = conn_set_prop->value,
+ .prop_id = conn_set_prop->prop_id,
+ .obj_id = conn_set_prop->connector_id,
+ .obj_type = DRM_MODE_OBJECT_CONNECTOR
+ };
+
+ /* It does all the locking and checking we need */
+ return drm_mode_obj_set_property_ioctl(dev_id, dev, &obj_set_prop, file, ioctl_mode, credp);
+}
+
+static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_connector *connector = obj_to_connector(obj);
+
+ /* Do DPMS ourselves */
+ if (property == connector->dev->mode_config.dpms_property) {
+ if (connector->funcs->dpms)
+ (*connector->funcs->dpms)(connector, (int)value);
+ ret = 0;
+ } else if (connector->funcs->set_property)
+ ret = connector->funcs->set_property(connector, property, value);
+
+ /* store the property value if successful */
+ if (!ret)
+ ret = drm_object_property_set_value(&connector->base, property, value);
+ return ret;
+}
+
+static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->set_property)
+ ret = crtc->funcs->set_property(crtc, property, value);
+ if (!ret)
+ ret = drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_plane *plane = obj_to_plane(obj);
+
+ if (plane->funcs->set_property)
+ ret = plane->funcs->set_property(plane, property, value);
+ if (!ret)
+ ret = drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+/* LINTED */
+int drm_mode_obj_get_properties_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_obj_get_properties *arg = data;
+ struct drm_mode_object *obj;
+ int ret = 0;
+ int i;
+ int copied = 0;
+ int props_count = 0;
+ uint32_t __user *props_ptr;
+ uint64_t __user *prop_values_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!obj->properties) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ props_count = obj->properties->count;
+
+ /* This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it. */
+ if ((arg->count_props >= props_count) && props_count) {
+ copied = 0;
+ props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+ prop_values_ptr = (uint64_t __user *)(unsigned long)
+ (arg->prop_values_ptr);
+ for (i = 0; i < props_count; i++) {
+ if (put_user(obj->properties->ids[i],
+ props_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (put_user(obj->properties->values[i],
+ prop_values_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ arg->count_props = props_count;
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/* LINTED */
+int drm_mode_obj_set_property_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_obj_set_property *arg = data;
+ struct drm_mode_object *arg_obj;
+ struct drm_mode_object *prop_obj;
+ struct drm_property *property;
+ int ret = -EINVAL;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ if (!arg_obj)
+ goto out;
+ if (!arg_obj->properties)
+ goto out;
+
+ for (i = 0; i < arg_obj->properties->count; i++)
+ if (arg_obj->properties->ids[i] == arg->prop_id)
+ break;
+
+ if (i == arg_obj->properties->count)
+ goto out;
+
+ prop_obj = drm_mode_object_find(dev, arg->prop_id,
+ DRM_MODE_OBJECT_PROPERTY);
+ if (!prop_obj)
+ goto out;
+ property = obj_to_property(prop_obj);
+
+ if (!drm_property_change_is_valid(property, arg->value))
+ goto out;
+
+ switch (arg_obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR:
+ ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+ arg->value);
+ break;
+ case DRM_MODE_OBJECT_CRTC:
+ ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+ break;
+ case DRM_MODE_OBJECT_PLANE:
+ ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
+ break;
+ }
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+{
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0) {
+ connector->encoder_ids[i] = encoder->base.id;
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+
+void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+{
+ int i;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == encoder->base.id) {
+ connector->encoder_ids[i] = 0;
+ if (connector->encoder == encoder)
+ connector->encoder = NULL;
+ break;
+ }
+ }
+}
+
+int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ int gamma_size)
+{
+ crtc->gamma_size = gamma_size;
+
+ crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
+ if (!crtc->gamma_store) {
+ crtc->gamma_size = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* LINTED */
+int drm_mode_gamma_set_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_crtc_lut *crtc_lut = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ void *r_base, *g_base, *b_base;
+ int size;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->gamma_set == NULL) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
+ /* memcpy into gamma store */
+ if (crtc_lut->gamma_size != crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ size = crtc_lut->gamma_size * (sizeof(uint16_t));
+ r_base = crtc->gamma_store;
+ if (DRM_COPY_FROM_USER(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ g_base = (void *)((uintptr_t)r_base + size);
+ if (DRM_COPY_FROM_USER(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ b_base = (void *)((uintptr_t)g_base + size);
+ if (DRM_COPY_FROM_USER(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+
+}
+
+/* LINTED */
+int drm_mode_gamma_get_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_crtc_lut *crtc_lut = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ void *r_base, *g_base, *b_base;
+ int size;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ /* memcpy into gamma store */
+ if (crtc_lut->gamma_size != crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ size = crtc_lut->gamma_size * (sizeof(uint16_t));
+ r_base = crtc->gamma_store;
+ if (DRM_COPY_TO_USER((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ g_base =(void *)((long)r_base + size);
+ if (DRM_COPY_TO_USER((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ b_base = (void *)((long)g_base + size);
+ if (DRM_COPY_TO_USER((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/* LINTED */
+int drm_mode_page_flip_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_crtc_page_flip *page_flip = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+ struct drm_pending_vblank_event *e = NULL;
+ unsigned long flags;
+ int hdisplay, vdisplay;
+ int ret = -EINVAL;
+
+ if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+ page_flip->reserved != 0)
+ return -EINVAL;
+
+ obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj)
+ return -EINVAL;
+ crtc = obj_to_crtc(obj);
+
+ mutex_lock(&crtc->mutex);
+ if (crtc->fb == NULL) {
+ /* The framebuffer is currently unbound, presumably
+ * due to a hotplug event, that userspace has not
+ * yet discovered.
+ */
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (crtc->funcs->page_flip == NULL)
+ goto out;
+
+ fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
+ if (!fb)
+ goto out;
+
+ hdisplay = crtc->mode.hdisplay;
+ vdisplay = crtc->mode.vdisplay;
+
+ if (crtc->invert_dimensions)
+ swap(hdisplay, vdisplay);
+
+ if (hdisplay > fb->width ||
+ vdisplay > fb->height ||
+ crtc->x > fb->width - hdisplay ||
+ crtc->y > fb->height - vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+ fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
+ crtc->invert_dimensions ? " (inverted)" : "");
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ if (crtc->fb->pixel_format != fb->pixel_format) {
+ DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ ret = -ENOMEM;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (file->event_space < sizeof e->event) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+ file->event_space -= sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (e == NULL) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+
+ e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = page_flip->user_data;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy =
+ (void (*) (void *, size_t)) kfree;
+ }
+
+ old_fb = crtc->fb;
+ ret = crtc->funcs->page_flip(crtc, fb, e);
+ if (ret) {
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e, sizeof(*e));
+ }
+ /* Keep the old fb, don't unref it. */
+ old_fb = NULL;
+ } else {
+ /*
+ * Warn if the driver hasn't properly updated the crtc->fb
+ * field to reflect that the new framebuffer is now used.
+ * Failing to do so will screw with the reference counting
+ * on framebuffers.
+ */
+ WARN_ON(crtc->fb != fb);
+ /* Unref only the old framebuffer. */
+ fb = NULL;
+ }
+
+out:
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
+ mutex_unlock(&crtc->mutex);
+
+ return ret;
+}
+
+void drm_mode_config_reset(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head)
+ if (crtc->funcs->reset)
+ crtc->funcs->reset(crtc);
+
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->funcs->reset)
+ encoder->funcs->reset(encoder);
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head)
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_create_dumb_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_create_dumb *args = data;
+
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
+ return dev->driver->dumb_create(file, dev, args);
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_mmap_dumb_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_map_dumb *args = data;
+
+ /* call driver ioctl to get mmap offset */
+ if (!dev->driver->dumb_map_offset)
+ return -ENOSYS;
+
+ return dev->driver->dumb_map_offset(file, dev, args->handle, &args->offset);
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_destroy_dumb_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ if (!dev->driver->dumb_destroy)
+ return -ENOSYS;
+
+ return dev->driver->dumb_destroy(file, dev, args->handle);
+}
+
+/*
+ * Just need to support RGB formats here for compat with code that doesn't
+ * use pixel formats directly yet.
+ */
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ *depth = 8;
+ *bpp = 8;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ *depth = 15;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ *depth = 16;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ *depth = 24;
+ *bpp = 24;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ *depth = 24;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ *depth = 30;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ *depth = 32;
+ *bpp = 32;
+ break;
+ default:
+ DRM_DEBUG_KMS("unsupported pixel format\n");
+ *depth = 0;
+ *bpp = 0;
+ break;
+ }
+}
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 3;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+/**
+ * drm_format_plane_cpp - determine the bytes per pixel value
+ * @format: pixel format (DRM_FORMAT_*)
+ * @plane: plane index
+ *
+ * RETURNS:
+ * The bytes per pixel value for the specified plane.
+ */
+int drm_format_plane_cpp(uint32_t format, int plane)
+{
+ unsigned int depth;
+ int bpp;
+
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ return 2;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ return plane ? 2 : 1;
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 1;
+ default:
+ drm_fb_get_bpp_depth(format, &depth, &bpp);
+ return bpp >> 3;
+ }
+}
+
+/**
+ * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The horizontal chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_horz_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+/**
+ * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The vertical chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_vert_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ *
+ * Since this initializes the modeset locks, no locking is possible. Which is no
+ * problem, since this should happen single threaded at init time. It is the
+ * driver's problem to ensure this guarantee.
+ *
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+ mutex_init(&dev->mode_config.mutex, NULL, MUTEX_DRIVER, NULL);
+ mutex_init(&dev->mode_config.idr_mutex, NULL, MUTEX_DRIVER, NULL);
+ mutex_init(&dev->mode_config.fb_lock, NULL, MUTEX_DRIVER, NULL);
+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
+ INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+ INIT_LIST_HEAD(&dev->mode_config.connector_list);
+ INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ INIT_LIST_HEAD(&dev->mode_config.plane_list);
+ idr_init(&dev->mode_config.crtc_idr);
+
+ drm_modeset_lock_all(dev);
+ (void) drm_mode_create_standard_connector_properties(dev);
+ drm_modeset_unlock_all(dev);
+
+ /* Just to be sure */
+ dev->mode_config.num_fb = 0;
+ dev->mode_config.num_connector = 0;
+ dev->mode_config.num_crtc = 0;
+ dev->mode_config.num_encoder = 0;
+}
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * Note that since this /should/ happen single-threaded at driver/device
+ * teardown time, no locking is required. It's the driver's job to ensure that
+ * this guarantee actually holds true.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+ struct drm_connector *connector, *ot;
+ struct drm_crtc *crtc, *ct;
+ struct drm_encoder *encoder, *enct;
+ struct drm_framebuffer *fb, *fbt;
+ struct drm_property *property, *pt;
+ struct drm_property_blob *blob, *bt;
+ struct drm_plane *plane, *plt;
+
+ list_for_each_entry_safe(encoder, enct, struct drm_encoder, &dev->mode_config.encoder_list,
+ head) {
+ encoder->funcs->destroy(encoder);
+ }
+
+ list_for_each_entry_safe(connector, ot, struct drm_connector,
+ &dev->mode_config.connector_list, head) {
+ connector->funcs->destroy(connector);
+ }
+
+ list_for_each_entry_safe(property, pt, struct drm_property, &dev->mode_config.property_list,
+ head) {
+ drm_property_destroy(dev, property);
+ }
+
+ list_for_each_entry_safe(blob, bt, struct drm_property_blob, &dev->mode_config.property_blob_list,
+ head) {
+ drm_property_destroy_blob(dev, blob);
+ }
+
+ /*
+ * Single-threaded teardown context, so it's not required to grab the
+ * fb_lock to protect against concurrent fb_list access. Contrary, it
+ * would actually deadlock with the drm_framebuffer_cleanup function.
+ *
+ * Also, if there are any framebuffers left, that's a driver leak now,
+ * so politely WARN about this.
+ */
+ WARN_ON(!list_empty(&dev->mode_config.fb_list));
+ list_for_each_entry_safe(fb, fbt, struct drm_framebuffer, &dev->mode_config.fb_list, head) {
+ drm_framebuffer_remove(fb);
+ }
+
+ list_for_each_entry_safe(plane, plt, struct drm_plane, &dev->mode_config.plane_list,
+ head) {
+ plane->funcs->destroy(plane);
+ }
+
+ list_for_each_entry_safe(crtc, ct, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+ idr_remove_all(&dev->mode_config.crtc_idr);
+ idr_destroy(&dev->mode_config.crtc_idr);
+}
diff --git a/usr/src/uts/common/io/drm/drm_crtc_helper.c b/usr/src/uts/common/io/drm/drm_crtc_helper.c
new file mode 100644
index 0000000..50eb0be
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_crtc_helper.c
@@ -0,0 +1,1084 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006-2008, 2013, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fourcc.h"
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "drm_edid.h"
+
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+ struct drm_connector *connector, *tmp;
+ struct list_head panel_list;
+ struct list_head *tmp2;
+
+ INIT_LIST_HEAD(&panel_list);
+
+ list_for_each_entry_safe(connector, tmp, struct drm_connector,
+ &dev->mode_config.connector_list, head) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ list_move_tail(&connector->head, &panel_list, (caddr_t)connector);
+ }
+ tmp2 = dev->mode_config.connector_list.next;
+ list_splice(&panel_list, &dev->mode_config.connector_list, tmp2);
+}
+
+static bool drm_kms_helper_poll = true;
+
+static void drm_mode_validate_flag(struct drm_connector *connector,
+ int flags)
+{
+ struct drm_display_mode *mode;
+
+ if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
+ return;
+
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head) {
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+ !(flags & DRM_MODE_FLAG_INTERLACE))
+ mode->status = MODE_NO_INTERLACE;
+ if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
+ !(flags & DRM_MODE_FLAG_DBLSCAN))
+ mode->status = MODE_NO_DBLESCAN;
+ }
+
+ return;
+}
+
+/**
+ * drm_helper_probe_connector_modes - get complete set of display modes
+ * @dev: DRM device
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Based on @dev's mode_config layout, scan all the connectors and try to detect
+ * modes on them. Modes will first be added to the connector's probed_modes
+ * list, then culled (based on validity and the @maxX, @maxY parameters) and
+ * put into the normal modes list.
+ *
+ * Intended to be used either at bootup time or when major configuration
+ * changes have occurred.
+ *
+ * FIXME: take into account monitor limits
+ *
+ * RETURNS:
+ * Number of modes found on @connector.
+ */
+int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ int count = 0;
+ int mode_flags = 0;
+ bool verbose_prune = true;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
+ drm_get_connector_name(connector));
+ /* set all modes to the unverified state */
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head)
+ mode->status = MODE_UNVERIFIED;
+
+ if (connector->force) {
+ if (connector->force == DRM_FORCE_ON)
+ connector->status = connector_status_connected;
+ else
+ connector->status = connector_status_disconnected;
+ if (connector->funcs->force)
+ connector->funcs->force(connector);
+ } else {
+ connector->status = connector->funcs->detect(connector, true);
+ }
+ /* Re-enable polling in case the global poll config changed. */
+ if (drm_kms_helper_poll != dev->mode_config.poll_running)
+ drm_kms_helper_poll_enable(dev);
+
+ dev->mode_config.poll_running = drm_kms_helper_poll;
+
+ if (connector->status == connector_status_disconnected) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
+ connector->base.id, drm_get_connector_name(connector));
+ (void) drm_mode_connector_update_edid_property(connector, NULL);
+ verbose_prune = false;
+ goto prune;
+ }
+
+#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
+ count = drm_load_edid_firmware(connector);
+ if (count == 0)
+#endif
+ count = (*connector_funcs->get_modes)(connector);
+
+ if (count == 0 && connector->status == connector_status_connected)
+ count = drm_add_modes_noedid(connector, 1024, 768);
+ if (count == 0)
+ goto prune;
+
+ drm_mode_connector_list_update(connector);
+
+ if (maxX && maxY)
+ drm_mode_validate_size(dev, &connector->modes, maxX,
+ maxY, 0);
+
+ if (connector->interlace_allowed)
+ mode_flags |= DRM_MODE_FLAG_INTERLACE;
+ if (connector->doublescan_allowed)
+ mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+ drm_mode_validate_flag(connector, mode_flags);
+
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head){
+ if (mode->status == MODE_OK)
+ mode->status = connector_funcs->mode_valid(connector,
+ mode);
+ }
+
+prune:
+ drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
+
+ if (list_empty(&connector->modes))
+ return 0;
+
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head)
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ drm_mode_sort(&connector->modes);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
+ drm_get_connector_name(connector));
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head){
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_debug_printmodeline(mode);
+ }
+
+ return count;
+}
+
+
+/**
+ * drm_helper_encoder_in_use - check if a given encoder is in use
+ * @encoder: encoder to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @encoders's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @encoder is part of the mode_config, false otherwise.
+ */
+bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ return true;
+ return false;
+}
+
+/**
+ * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
+ * @crtc: CRTC to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @crtc's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @crtc is part of the mode_config, false otherwise.
+ */
+bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev = crtc->dev;
+ /* FIXME: Locking around list access? */
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
+ return true;
+ return false;
+}
+static void
+drm_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
+ if (encoder_funcs->disable)
+ (*encoder_funcs->disable)(encoder);
+ else
+ (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+}
+
+/**
+ * drm_helper_disable_unused_functions - disable unused objects
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
+ * by calling its dpms function, which should power it off.
+ */
+void drm_helper_disable_unused_functions(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ continue;
+ if (connector->status == connector_status_disconnected)
+ connector->encoder = NULL;
+ }
+
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+ if (!drm_helper_encoder_in_use(encoder)) {
+ drm_encoder_disable(encoder);
+ /* disconnector encoder from any connector */
+ encoder->crtc = NULL;
+ }
+ }
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+ if (!crtc->enabled) {
+ if (crtc_funcs->disable)
+ (*crtc_funcs->disable)(crtc);
+ else
+ (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
+ crtc->fb = NULL;
+ }
+ }
+}
+
+
+/**
+ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
+ * @encoder: encoder to test
+ * @crtc: crtc to test
+ *
+ * Return false if @encoder can't be driven by @crtc, true otherwise.
+ */
+static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
+ struct drm_crtc *crtc)
+{
+ struct drm_device *dev;
+ struct drm_crtc *tmp;
+ int crtc_mask = 1;
+
+ if (crtc == NULL)
+ DRM_ERROR("checking null crtc?\n");
+
+ dev = crtc->dev;
+
+ list_for_each_entry(tmp, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ if (tmp == crtc)
+ break;
+ crtc_mask <<= 1;
+ }
+
+ if (encoder->possible_crtcs & crtc_mask)
+ return true;
+ return false;
+}
+
+/*
+ * Check the CRTC we're going to map each output to vs. its current
+ * CRTC. If they don't match, we have to disable the output and the CRTC
+ * since the driver will have to re-route things.
+ */
+static void
+drm_crtc_prepare_encoders(struct drm_device *dev)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+ encoder_funcs = encoder->helper_private;
+ /* Disable unused encoders */
+ if (encoder->crtc == NULL)
+ drm_encoder_disable(encoder);
+ /* Disable encoders whose CRTC is about to change */
+ if (encoder_funcs->get_crtc &&
+ encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
+ drm_encoder_disable(encoder);
+ }
+}
+
+/**
+ * drm_crtc_set_mode - set a mode
+ * @crtc: CRTC to program
+ * @mode: mode to use
+ * @x: width of mode
+ * @y: height of mode
+ * @old_fb: old framebuffer, for cleanup
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
+ *
+ * RETURNS:
+ * True if the mode was set successfully, or false otherwise.
+ */
+bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ int saved_x, saved_y;
+ struct drm_encoder *encoder;
+ bool ret = true;
+
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+ if (!crtc->enabled)
+ return true;
+
+ adjusted_mode = drm_mode_duplicate(dev, mode);
+ if (!adjusted_mode)
+ return false;
+
+ saved_hwmode = crtc->hwmode;
+ saved_mode = crtc->mode;
+ saved_x = crtc->x;
+ saved_y = crtc->y;
+
+ /* Update crtc values up front so the driver can rely on them for mode
+ * setting.
+ */
+ crtc->mode = *mode;
+ crtc->x = x;
+ crtc->y = y;
+
+ /* Pass our mode to the connectors and the CRTC to give them a chance to
+ * adjust it according to limitations or connector properties, and also
+ * a chance to reject the mode entirely.
+ */
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+ encoder_funcs = encoder->helper_private;
+ if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
+ adjusted_mode))) {
+ DRM_DEBUG_KMS("Encoder fixup failed\n");
+ goto done;
+ }
+ }
+
+ if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
+ DRM_DEBUG_KMS("CRTC fixup failed\n");
+ goto done;
+ }
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+ /* Prepare the encoders and CRTCs before setting the mode. */
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+ encoder_funcs = encoder->helper_private;
+ /* Disable the encoders as the first thing we do. */
+ encoder_funcs->prepare(encoder);
+ }
+
+ drm_crtc_prepare_encoders(dev);
+
+ crtc_funcs->prepare(crtc);
+
+ /* Set up the DPLL and any encoders state that needs to adjust or depend
+ * on the DPLL.
+ */
+ ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+ if (!ret)
+ goto done;
+
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.id, drm_get_encoder_name(encoder),
+ mode->base.id, mode->name);
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+ }
+
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ crtc_funcs->commit(crtc);
+
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->commit(encoder);
+
+ }
+
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = *adjusted_mode;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc);
+
+ /* FIXME: add subpixel order */
+done:
+ drm_mode_destroy(dev, adjusted_mode);
+ if (!ret) {
+ crtc->hwmode = saved_hwmode;
+ crtc->mode = saved_mode;
+ crtc->x = saved_x;
+ crtc->y = saved_y;
+ }
+
+ return ret;
+}
+
+static int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ /* Decouple all encoders and their attached connectors from this crtc */
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ connector->encoder = NULL;
+ }
+ }
+
+ drm_helper_disable_unused_functions(dev);
+ return 0;
+}
+
+/**
+ * drm_crtc_helper_set_config - set a new config from userspace
+ * @set: mode set configuration
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
+ *
+ * RETURNS:
+ * Zero. (FIXME)
+ */
+int drm_crtc_helper_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct drm_crtc *save_crtcs, *new_crtc, *crtc;
+ struct drm_encoder *save_encoders, *new_encoder, *encoder;
+ struct drm_framebuffer *old_fb = NULL;
+ bool mode_changed = false; /* if true do a full mode set */
+ bool fb_changed = false; /* if true and !mode_changed just do a flip */
+ struct drm_connector *save_connectors, *connector;
+ int count = 0, ro, fail = 0;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_mode_set save_set;
+ int ret;
+ int i;
+
+ DRM_DEBUG_KMS("\n");
+
+ BUG_ON(!set);
+ BUG_ON(!set->crtc);
+ BUG_ON(!set->crtc->helper_private);
+
+ /* Enforce sane interface api - has been abused by the fb helper. */
+ BUG_ON(!set->mode && set->fb);
+ BUG_ON(set->fb && set->num_connectors == 0);
+
+ crtc_funcs = set->crtc->helper_private;
+
+ if (!set->mode)
+ set->fb = NULL;
+
+ if (set->fb) {
+ DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+ return drm_crtc_helper_disable(set->crtc);
+ }
+
+ dev = set->crtc->dev;
+
+ /* Allocate space for the backup of all (non-pointer) crtc, encoder and
+ * connector data. */
+ save_crtcs = kzalloc(dev->mode_config.num_crtc *
+ sizeof(struct drm_crtc), GFP_KERNEL);
+ if (!save_crtcs)
+ return -ENOMEM;
+
+ save_encoders = kzalloc(dev->mode_config.num_encoder *
+ sizeof(struct drm_encoder), GFP_KERNEL);
+ if (!save_encoders) {
+ kfree(save_crtcs, dev->mode_config.num_crtc * sizeof(struct drm_crtc));
+ return -ENOMEM;
+ }
+
+ save_connectors = kzalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_connector), GFP_KERNEL);
+ if (!save_connectors) {
+ kfree(save_crtcs, dev->mode_config.num_crtc * sizeof(struct drm_crtc));
+ kfree(save_encoders, dev->mode_config.num_encoder * sizeof(struct drm_encoder));
+ return -ENOMEM;
+ }
+
+ /* Copy data. Note that driver private data is not affected.
+ * Should anything bad happen only the expected state is
+ * restored, not the drivers personal bookkeeping.
+ */
+ count = 0;
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ save_crtcs[count++] = *crtc;
+ }
+
+ count = 0;
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+ save_encoders[count++] = *encoder;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ save_connectors[count++] = *connector;
+ }
+
+ save_set.crtc = set->crtc;
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+ save_set.fb = set->crtc->fb;
+
+ /* We should be able to check here if the fb has the same properties
+ * and then just flip_or_move it */
+ if (set->crtc->fb != set->fb) {
+ /* If we have no fb then treat it as a full mode set */
+ if (set->crtc->fb == NULL) {
+ DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+ mode_changed = true;
+ } else if (set->fb == NULL) {
+ mode_changed = true;
+ } else if (set->fb->pixel_format !=
+ set->crtc->fb->pixel_format) {
+ mode_changed = true;
+ } else
+ fb_changed = true;
+ }
+
+ if (set->x != set->crtc->x || set->y != set->crtc->y)
+ fb_changed = true;
+
+ if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+ DRM_DEBUG_KMS("modes are different, full mode set\n");
+ drm_mode_debug_printmodeline(&set->crtc->mode);
+ drm_mode_debug_printmodeline(set->mode);
+ mode_changed = true;
+ }
+
+ /* a) traverse passed in connector list and get encoders for them */
+ count = 0;
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ new_encoder = connector->encoder;
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == connector) {
+ new_encoder = connector_funcs->best_encoder(connector);
+ /* if we can't get an encoder for a connector
+ we are setting now - then fail */
+ if (new_encoder == NULL)
+ /* don't break so fail path works correct */
+ fail = 1;
+ break;
+/*
+ if (connector->dpms != DRM_MODE_DPMS_ON) {
+ DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
+ mode_changed = true;
+ }
+*/
+ }
+ }
+
+ if (new_encoder != connector->encoder) {
+ DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ mode_changed = true;
+ /* If the encoder is reused for another connector, then
+ * the appropriate crtc will be set later.
+ */
+ if (connector->encoder)
+ connector->encoder->crtc = NULL;
+ connector->encoder = new_encoder;
+ }
+ }
+
+ if (fail) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ continue;
+
+ if (connector->encoder->crtc == set->crtc)
+ new_crtc = NULL;
+ else
+ new_crtc = connector->encoder->crtc;
+
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == connector)
+ new_crtc = set->crtc;
+ }
+
+ /* Make sure the new CRTC will work with the encoder */
+ if (new_crtc &&
+ !drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ if (new_crtc != connector->encoder->crtc) {
+ DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ mode_changed = true;
+ connector->encoder->crtc = new_crtc;
+ }
+ if (new_crtc) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+ connector->base.id, drm_get_connector_name(connector),
+ new_crtc->base.id);
+ } else {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.id, drm_get_connector_name(connector));
+ }
+ }
+
+ /* mode_set_base is not a required function */
+ if (fb_changed && !crtc_funcs->mode_set_base)
+ mode_changed = true;
+
+ if (mode_changed) {
+ set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
+ if (set->crtc->enabled) {
+ DRM_DEBUG_KMS("attempting to set mode from"
+ " userspace\n");
+ drm_mode_debug_printmodeline(set->mode);
+ old_fb = set->crtc->fb;
+ set->crtc->fb = set->fb;
+ if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
+ set->x, set->y,
+ old_fb)) {
+ DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+ set->crtc->base.id);
+ set->crtc->fb = old_fb;
+ ret = -EINVAL;
+ goto fail;
+ }
+ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ drm_get_connector_name(set->connectors[i]));
+ set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
+ }
+ }
+ drm_helper_disable_unused_functions(dev);
+ } else if (fb_changed) {
+ set->crtc->x = set->x;
+ set->crtc->y = set->y;
+
+ old_fb = set->crtc->fb;
+ if (set->crtc->fb != set->fb)
+ set->crtc->fb = set->fb;
+ ret = crtc_funcs->mode_set_base(set->crtc,
+ set->x, set->y, old_fb);
+ if (ret != 0) {
+ set->crtc->fb = old_fb;
+ goto fail;
+ }
+ }
+
+ kfree(save_connectors, dev->mode_config.num_connector * sizeof(struct drm_connector));
+ kfree(save_encoders, dev->mode_config.num_encoder * sizeof(struct drm_encoder));
+ kfree(save_crtcs, dev->mode_config.num_crtc * sizeof(struct drm_crtc));
+ return 0;
+
+fail:
+ /* Restore all previous data. */
+ count = 0;
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ *crtc = save_crtcs[count++];
+ }
+
+ count = 0;
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+ *encoder = save_encoders[count++];
+ }
+
+ count = 0;
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ *connector = save_connectors[count++];
+ }
+
+ /* Try to restore the config */
+ if (mode_changed &&
+ !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
+ save_set.y, save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+
+ kfree(save_connectors, dev->mode_config.num_connector * sizeof(struct drm_connector));
+ kfree(save_encoders, dev->mode_config.num_encoder * sizeof(struct drm_encoder));
+ kfree(save_crtcs, dev->mode_config.num_crtc * sizeof(struct drm_crtc));
+ return ret;
+}
+
+
+static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
+{
+ int dpms = DRM_MODE_DPMS_OFF;
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ if (connector->dpms < dpms)
+ dpms = connector->dpms;
+ return dpms;
+}
+
+static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
+{
+ int dpms = DRM_MODE_DPMS_OFF;
+ struct drm_connector *connector;
+ struct drm_device *dev = crtc->dev;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder && connector->encoder->crtc == crtc)
+ if (connector->dpms < dpms)
+ dpms = connector->dpms;
+ return dpms;
+}
+
+/**
+ * drm_helper_connector_dpms
+ * @connector affected connector
+ * @mode DPMS mode
+ *
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
+ */
+void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+ int old_dpms;
+
+ if (mode == connector->dpms)
+ return;
+
+ old_dpms = connector->dpms;
+ connector->dpms = mode;
+
+ /* from off to on, do crtc then encoder */
+ if (mode < old_dpms) {
+ if (crtc) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ if (encoder) {
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+ }
+
+ /* from on to off, do encoder then crtc */
+ if (mode > old_dpms) {
+ if (encoder) {
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+ if (crtc) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ }
+
+ return;
+}
+
+
+int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ int i;
+
+ fb->width = mode_cmd->width;
+ fb->height = mode_cmd->height;
+ for (i = 0; i < 4; i++) {
+ fb->pitches[i] = mode_cmd->pitches[i];
+ fb->offsets[i] = mode_cmd->offsets[i];
+ }
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+ &fb->bits_per_pixel);
+ fb->pixel_format = mode_cmd->pixel_format;
+
+ return 0;
+}
+
+void drm_helper_resume_force_mode(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ int ret;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+
+ if (!crtc->enabled)
+ continue;
+
+ ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+
+ if (ret == false)
+ DRM_ERROR("failed to set mode on crtc %p\n", (void *)crtc);
+
+ /* Turn off outputs that were already powered off */
+ if (drm_helper_choose_crtc_dpms(crtc)) {
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+
+ if(encoder->crtc != crtc)
+ continue;
+
+ encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+
+ crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ }
+ /* disable the unused connectors while restoring the modesetting */
+ drm_helper_disable_unused_functions(dev);
+}
+
+void drm_kms_helper_hotplug_event(struct drm_device *dev)
+{
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+}
+#define DRM_OUTPUT_POLL_PERIOD (10*DRM_HZ)
+static void
+output_poll_execute(struct work_struct *work)
+{
+ struct drm_device *dev = container_of(work, struct drm_device,
+ output_poll_work);
+ struct drm_connector *connector;
+ enum drm_connector_status old_status;
+ bool repoll = false, changed = false;
+
+ if (!drm_kms_helper_poll)
+ return;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+
+ /* Ignore forced connectors. */
+ if (connector->force)
+ continue;
+
+ /* Ignore HPD capable connectors and connectors where we don't
+ * want any hotplug detection at all for polling. */
+ if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
+ continue;
+
+ repoll = true;
+
+ old_status = connector->status;
+ /* if we are connected and don't want to poll for disconnect
+ skip it */
+ if (old_status == connector_status_connected &&
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
+ continue;
+
+ connector->status = connector->funcs->detect(connector, false);
+ if (old_status != connector->status) {
+ const char *old, *new;
+
+ old = drm_get_connector_status_name(old_status);
+ new = drm_get_connector_status_name(connector->status);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
+ "status updated from %s to %s\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ old, new);
+
+ changed = true;
+ }
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
+
+ if (repoll)
+ test_set_timer(&dev->output_poll_timer, DRM_OUTPUT_POLL_PERIOD);
+}
+
+void
+output_poll_execute_timer(void *device)
+{
+ struct drm_device *dev = (struct drm_device *)device;
+ (void) queue_work(dev->drm_wq, &dev->output_poll_work);
+}
+
+void drm_kms_helper_poll_disable(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+ del_timer_sync(&dev->output_poll_timer);
+}
+
+void drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+ bool poll = false;
+ struct drm_connector *connector;
+
+ if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+ return;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ if (connector->polled)
+ poll = true;
+ }
+
+ if (poll)
+ test_set_timer(&dev->output_poll_timer, DRM_OUTPUT_POLL_PERIOD);
+}
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+ INIT_WORK(&dev->output_poll_work, output_poll_execute);
+ setup_timer(&dev->output_poll_timer, output_poll_execute_timer,
+ (void *)dev);
+
+ dev->mode_config.poll_enabled = true;
+
+ drm_kms_helper_poll_enable(dev);
+}
+
+void drm_kms_helper_poll_fini(struct drm_device *dev)
+{
+ drm_kms_helper_poll_disable(dev);
+}
+
+void drm_helper_hpd_irq_event(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+
+ /* kill timer and schedule immediate execution, this doesn't block */
+ del_timer_sync(&dev->output_poll_timer);
+ cancel_delayed_work(dev->drm_wq);
+ if (drm_kms_helper_poll)
+ test_set_timer(&dev->output_poll_timer, 0);
+}
+
diff --git a/usr/src/uts/common/io/drm/drm_dma.c b/usr/src/uts/common/io/drm/drm_dma.c
index 589c486..54d7981 100644
--- a/usr/src/uts/common/io/drm/drm_dma.c
+++ b/usr/src/uts/common/io/drm/drm_dma.c
@@ -1,8 +1,22 @@
/*
- * drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
*/
+
/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+/**
+ * \file drm_dma.c
+ * DMA IOCTL and function support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -25,109 +39,121 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
*/
-/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "drmP.h"
-int
-drm_dma_setup(drm_device_t *dev)
+/**
+ * Initialize the DMA data.
+ *
+ * \param dev DRM device.
+ * \return zero on success or a negative value on failure.
+ *
+ * Allocate and initialize a drm_device_dma structure.
+ */
+int drm_dma_setup(struct drm_device *dev)
{
int i;
- drm_buf_entry_t *pbuf;
- dev->dma = drm_calloc(1, sizeof (*dev->dma), DRM_MEM_DMA);
- if (dev->dma == NULL)
- return (ENOMEM);
+ dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
+ if (!dev->dma)
+ return -ENOMEM;
+
+ (void) memset(dev->dma, 0, sizeof(*dev->dma));
+
+ for (i = 0; i <= DRM_MAX_ORDER; i++)
+ (void) memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
mutex_init(&dev->dma_lock, NULL, MUTEX_DRIVER, NULL);
- pbuf = &(dev->dma->bufs[0]);
- for (i = 0; i <= DRM_MAX_ORDER; i++, pbuf++)
- bzero(pbuf, sizeof (drm_buf_entry_t));
- return (0);
+ return 0;
}
-void
-drm_dma_takedown(drm_device_t *dev)
+/**
+ * Cleanup the DMA resources.
+ *
+ * \param dev DRM device.
+ *
+ * Free all pages associated with DMA buffers, the buffers and pages lists, and
+ * finally the drm_device::dma structure itself.
+ */
+void drm_dma_takedown(struct drm_device *dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i, j;
- if (dma == NULL)
+ if (!dma)
return;
/* Clear dma buffers */
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].seg_count) {
- drm_free(dma->bufs[i].seglist,
- dma->bufs[i].seg_count *
- sizeof (*dma->bufs[0].seglist), DRM_MEM_SEGS);
+ DRM_DEBUG("order %d: buf_count = %d,"
+ " seg_count = %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].seg_count);
+ kfree(dma->bufs[i].seglist,
+ dma->bufs[i].seg_count * sizeof (*dma->bufs[0].seglist));
}
-
- for (j = 0; j < dma->bufs[i].buf_count; j++) {
- if (dma->bufs[i].buflist[j].dev_private) {
- drm_free(dma->bufs[i].buflist[j].dev_private,
- dma->bufs[i].buflist[j].dev_priv_size,
- DRM_MEM_BUFS);
+ if (dma->bufs[i].buf_count) {
+ for (j = 0; j < dma->bufs[i].buf_count; j++) {
+ if (dma->bufs[i].buflist[j].dev_private) {
+ kfree(dma->bufs[i].buflist[j].dev_private,
+ dma->bufs[i].buflist[j].dev_priv_size);
+ }
}
+ kfree(dma->bufs[i].buflist,
+ dma->bufs[i].buf_count * sizeof(*dma->bufs[0].buflist));
}
- if (dma->bufs[i].buf_count)
- drm_free(dma->bufs[i].buflist,
- dma->bufs[i].buf_count *
- sizeof (*dma->bufs[0].buflist), DRM_MEM_BUFS);
- }
- if (dma->buflist) {
- drm_free(dma->buflist,
- dma->buf_count *sizeof (*dma->buflist),
- DRM_MEM_BUFS);
- }
-
- if (dma->pagelist) {
- drm_free(dma->pagelist,
- dma->page_count *sizeof (*dma->pagelist),
- DRM_MEM_PAGES);
}
- drm_free(dev->dma, sizeof (*dev->dma), DRM_MEM_DRIVER);
+ if (dma->buflist)
+ kfree(dma->buflist, dma->buf_count * sizeof(*dma->buflist));
+ if (dma->pagelist)
+ kfree(dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
+ kfree(dev->dma, sizeof(*dev->dma));
dev->dma = NULL;
+
mutex_destroy(&dev->dma_lock);
}
-
-/*ARGSUSED*/
-void
-drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
+/**
+ * Free a buffer.
+ *
+ * \param dev DRM device.
+ * \param buf buffer to free.
+ *
+ * Resets the fields of \p buf.
+ */
+/* LINTED */
+void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf)
{
if (!buf)
return;
- buf->pending = 0;
- buf->filp = NULL;
- buf->used = 0;
+ buf->pending = 0;
+ buf->file_priv = NULL;
+ buf->used = 0;
}
-void
-drm_reclaim_buffers(drm_device_t *dev, drm_file_t *fpriv)
+/**
+ * Reclaim the buffers.
+ *
+ * \param file_priv DRM file private.
+ *
+ * Frees each buffer associated with \p file_priv not already on the hardware.
+ */
+void drm_core_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *file_priv)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
return;
for (i = 0; i < dma->buf_count; i++) {
- if (dma->buflist[i]->filp == fpriv) {
+ if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_free_buffer(dev, dma->buflist[i]);
@@ -143,15 +169,3 @@ drm_reclaim_buffers(drm_device_t *dev, drm_file_t *fpriv)
}
}
-/* Call into the driver-specific DMA handler */
-int
-drm_dma(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
-
- if (dev->driver->dma_ioctl) {
- return (dev->driver->dma_ioctl(dev, data, fpriv, mode));
- } else {
- return (EINVAL);
- }
-}
diff --git a/usr/src/uts/common/io/drm/drm_dp_helper.c b/usr/src/uts/common/io/drm/drm_dp_helper.c
new file mode 100644
index 0000000..9cb2fc4
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_dp_helper.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2013 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include "drm_dp_helper.h"
+#include "drmP.h"
+
+
+/* Helpers for DP link training */
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ u8 l = dp_link_status(link_status, i);
+ return (l >> s) & 0xf;
+}
+
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_align;
+ u8 lane_status;
+ int lane;
+
+ lane_align = dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(100);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(400);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate)
+{
+ switch (link_rate) {
+ case 162000:
+ default:
+ return DP_LINK_BW_1_62;
+ case 270000:
+ return DP_LINK_BW_2_7;
+ case 540000:
+ return DP_LINK_BW_5_4;
+ }
+}
+
+int drm_dp_bw_code_to_link_rate(u8 link_bw)
+{
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ return 162000;
+ case DP_LINK_BW_2_7:
+ return 270000;
+ case DP_LINK_BW_5_4:
+ return 540000;
+ }
+}
+
+int
+drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+}
diff --git a/usr/src/uts/common/io/drm/drm_dp_i2c_helper.c b/usr/src/uts/common/io/drm/drm_dp_i2c_helper.c
new file mode 100644
index 0000000..31350c1
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_dp_i2c_helper.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include "drm_dp_helper.h"
+#include "drmP.h"
+
+
+/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+static int
+i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ ret = (*algo_data->aux_ch)(adapter, mode,
+ write_byte, read_byte);
+ return ret;
+}
+
+/*
+ * I2C over AUX CH
+ */
+
+/*
+ * Send the address. If the I2C link is running, this 'restarts'
+ * the connection with the new address, this is used for doing
+ * a write followed by a read (as needed for DDC)
+ */
+static int
+i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_START;
+ int ret;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ algo_data->address = address;
+ algo_data->running = true;
+ ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ return ret;
+}
+
+/*
+ * Stop the I2C transaction. This closes out the link, sending
+ * a bare address packet with the MOT bit turned off
+ */
+static void
+i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_STOP;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ if (algo_data->running) {
+ (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ algo_data->running = false;
+ }
+}
+
+/*
+ * Write a single byte to the current I2C address, the
+ * the I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+ return ret;
+}
+
+/*
+ * Read a single byte from the current I2C address, the
+ * I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+ return ret;
+}
+
+static int
+i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ int ret = 0;
+ bool reading = false;
+ int m;
+ int b;
+
+ for (m = 0; m < num; m++) {
+ u16 len = msgs[m].len;
+ u8 *buf = msgs[m].buf;
+ reading = (msgs[m].flags & I2C_M_RD) != 0;
+ ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
+ if (ret < 0)
+ break;
+ if (reading) {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+ if (ret < 0)
+ break;
+ }
+ }
+ if (ret < 0)
+ break;
+ }
+ if (ret >= 0)
+ ret = num;
+ i2c_algo_dp_aux_stop(adapter, reading);
+ DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
+ return ret;
+}
+
+static u32
+/* LINTED E_FUNC_ARG_UNUSED */
+i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
+{
+ return 0;
+}
+
+static struct i2c_algorithm i2c_dp_aux_algo = {
+ .master_xfer = i2c_algo_dp_aux_xfer,
+ .functionality = i2c_algo_dp_aux_functionality,
+};
+
+static void
+i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
+{
+ (void) i2c_algo_dp_aux_address(adapter, 0, false);
+ (void) i2c_algo_dp_aux_stop(adapter, false);
+
+}
+
+static int
+i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
+{
+ adapter->algo = &i2c_dp_aux_algo;
+ adapter->retries = 3;
+ i2c_dp_aux_reset_bus(adapter);
+ return 0;
+}
+
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
+{
+ int error;
+
+ error = i2c_dp_aux_prepare_bus(adapter);
+ if (error)
+ return error;
+// error = i2c_add_adapter(adapter);
+ return error;
+}
diff --git a/usr/src/uts/common/io/drm/drm_drawable.c b/usr/src/uts/common/io/drm/drm_drawable.c
deleted file mode 100644
index 3ccc443..0000000
--- a/usr/src/uts/common/io/drm/drm_drawable.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * drm_drawable.h -- IOCTLs for drawables -*- linux-c -*-
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- */
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
- */
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#include "drmP.h"
-
-/*ARGSUSED*/
-int
-drm_adddraw(DRM_IOCTL_ARGS)
-{
- drm_draw_t draw;
-
- draw.handle = 0; /* NOOP */
- DRM_DEBUG("draw.handle = %d\n", draw.handle);
-
- DRM_COPYTO_WITH_RETURN((void *)data, &draw, sizeof (draw));
-
- return (0);
-}
-
-/*ARGSUSED*/
-int
-drm_rmdraw(DRM_IOCTL_ARGS)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-drm_drawable_info_t *
-drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) {
- return (NULL);
-}
-
-/*ARGSUSED*/
-int
-drm_update_draw(DRM_IOCTL_ARGS)
-{
- DRM_DEBUG("drm_update_draw\n");
- return (0);
-}
diff --git a/usr/src/uts/common/io/drm/drm_drv.c b/usr/src/uts/common/io/drm/drm_drv.c
index ae86db5..f992c16 100644
--- a/usr/src/uts/common/io/drm/drm_drv.c
+++ b/usr/src/uts/common/io/drm/drm_drv.c
@@ -1,11 +1,15 @@
/*
+ * Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
* drm_drv.h -- Generic driver template -*- linux-c -*-
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
*/
/*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2013, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -33,545 +37,640 @@
*
*/
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#include "drmP.h"
#include "drm.h"
+#include "drmP.h"
+#include "drm_core.h"
+#include "drm_io32.h"
#include "drm_sarea.h"
-
-int drm_debug_flag = 1;
-
-#define DRIVER_IOCTL_COUNT 256
-drm_ioctl_desc_t drm_ioctls[DRIVER_IOCTL_COUNT] = {
- [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] =
- {drm_version, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] =
- {drm_getunique, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] =
- {drm_getmagic, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] =
- {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] =
- {drm_getmap, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] =
- {drm_getclient, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] =
- {drm_getstats, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] =
- {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MODESET_CTL)] =
- {drm_modeset_ctl, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GEM_CLOSE)] =
- {drm_gem_close_ioctl, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GEM_FLINK)] =
- {drm_gem_flink_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_GEM_OPEN)] =
- {drm_gem_open_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] =
- {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] =
- {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] =
- {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] =
- {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] =
- {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] =
- {drm_rmmap_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] =
- {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] =
- {drm_getsareactx, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] =
- {drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] =
- {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] =
- {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] =
- {drm_getctx, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] =
- {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] =
- {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] =
- {drm_resctx, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] =
- {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] =
- {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] =
- {drm_lock, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] =
- {drm_unlock, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] =
- {drm_noop, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] =
- {drm_addbufs_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] =
- {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] =
- {drm_infobufs, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] =
- {drm_mapbufs, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] =
- {drm_freebufs, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_DMA)] =
- {drm_dma, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] =
- {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] =
- {drm_agp_acquire, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] =
- {drm_agp_release, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] =
- {drm_agp_enable, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] =
- {drm_agp_info, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] =
- {drm_agp_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] =
- {drm_agp_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] =
- {drm_agp_bind, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] =
- {drm_agp_unbind, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] =
- {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] =
- {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] =
- {drm_wait_vblank, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] =
- {drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+#include "drm_linux_list.h"
+
+static int drm_version(DRM_IOCTL_ARGS);
+
+/** Ioctl table */
+static struct drm_ioctl_desc drm_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED, copyin32_drm_version, copyout32_drm_version),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0, copyin32_drm_unique, copyout32_drm_unique),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED, copyin32_drm_map, copyout32_drm_map),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED, copyin32_drm_client, copyout32_drm_client),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED, NULL, copyout32_drm_stats),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, copyin32_drm_map, copyout32_drm_map),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH, copyin32_drm_map, copyout32_drm_map),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, copyin32_drm_ctx_priv_map, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH, copyin32_drm_ctx_priv_map, copyout32_drm_ctx_priv_map),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH, copyin32_drm_ctx_res, copyout32_drm_ctx_res),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, copyin32_drm_buf_desc, copyout32_drm_buf_desc),
+ DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH, copyin32_drm_buf_map, copyout32_drm_buf_map),
+ DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH, copyin32_drm_buf_free, NULL),
+ /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
+ DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, copyin32_drm_scatter_gather, copyout32_drm_scatter_gather),
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, copyin32_drm_scatter_gather, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED, copyin32_drm_wait_vblank, copyout32_drm_wait_vblank),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL)
};
-extern void idr_list_free(struct idr_list *head);
+#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
-const char *
-drm_find_description(int vendor, int device, drm_pci_id_list_t *idlist)
-{
- int i = 0;
- for (i = 0; idlist[i].vendor != 0; i++) {
- if ((idlist[i].vendor == vendor) &&
- (idlist[i].device == device)) {
- return (idlist[i].name);
- }
- }
- return ((char *)NULL);
-}
-
-static int
-drm_firstopen(drm_device_t *dev)
-{
- int i;
- int retval;
- drm_local_map_t *map;
-
- /* prebuild the SAREA */
- retval = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
- _DRM_CONTAINS_LOCK, &map);
- if (retval != 0) {
- DRM_ERROR("firstopen: failed to prebuild SAREA");
- return (retval);
- }
-
- if (dev->driver->use_agp) {
- DRM_DEBUG("drm_firstopen: use_agp=%d", dev->driver->use_agp);
- if (drm_device_is_agp(dev))
- dev->agp = drm_agp_init(dev);
- if (dev->driver->require_agp && dev->agp == NULL) {
- DRM_ERROR("couldn't initialize AGP");
- return (EIO);
- }
- }
-
- if (dev->driver->firstopen)
- retval = dev->driver->firstopen(dev);
-
- if (retval != 0) {
- DRM_ERROR("drm_firstopen: driver-specific firstopen failed");
- return (retval);
- }
-
- dev->buf_use = 0;
-
- if (dev->driver->use_dma) {
- i = drm_dma_setup(dev);
- if (i != 0)
- return (i);
- }
- dev->counters = 6;
- dev->types[0] = _DRM_STAT_LOCK;
- dev->types[1] = _DRM_STAT_OPENS;
- dev->types[2] = _DRM_STAT_CLOSES;
- dev->types[3] = _DRM_STAT_IOCTLS;
- dev->types[4] = _DRM_STAT_LOCKS;
- dev->types[5] = _DRM_STAT_UNLOCKS;
-
- for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
- *(&dev->counts[i]) = 0;
-
- for (i = 0; i < DRM_HASH_SIZE; i++) {
- dev->magiclist[i].head = NULL;
- dev->magiclist[i].tail = NULL;
- }
-
- dev->irq_enabled = 0;
- dev->context_flag = 0;
- dev->last_context = 0;
- dev->if_version = 0;
-
- return (0);
-}
-
-/* Free resources associated with the DRM on the last close. */
-static int
-drm_lastclose(drm_device_t *dev)
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
{
- drm_magic_entry_t *pt, *next;
- drm_local_map_t *map, *mapsave;
- int i;
-
- DRM_SPINLOCK_ASSERT(&dev->dev_lock);
+ DRM_DEBUG("\n");
- if (dev->driver->lastclose != NULL)
+ if (dev->driver->lastclose)
dev->driver->lastclose(dev);
+ DRM_DEBUG("driver lastclose completed\n");
- if (dev->irq_enabled)
+ if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
(void) drm_irq_uninstall(dev);
- if (dev->unique) {
- drm_free(dev->unique, dev->unique_len + 1, DRM_MEM_DRIVER);
- dev->unique = NULL;
- dev->unique_len = 0;
- }
-
- /* Clear pid list */
- for (i = 0; i < DRM_HASH_SIZE; i++) {
- for (pt = dev->magiclist[i].head; pt; pt = next) {
- next = pt->next;
- drm_free(pt, sizeof (*pt), DRM_MEM_MAGIC);
- }
- dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
- }
+ mutex_lock(&dev->struct_mutex);
/* Clear AGP information */
- if (dev->agp) {
- drm_agp_mem_t *entry;
- drm_agp_mem_t *nexte;
+ if (drm_core_has_AGP(dev) && dev->agp &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct drm_agp_mem *entry, *tempe;
- /*
- * Remove AGP resources, but leave dev->agp
- * intact until drm_cleanup is called.
- */
- for (entry = dev->agp->memory; entry; entry = nexte) {
- nexte = entry->next;
+ /* Remove AGP resources, but leave dev->agp
+ intact until drv_cleanup is called. */
+ list_for_each_entry_safe(entry, tempe, struct drm_agp_mem, &dev->agp->memory, head) {
if (entry->bound)
- (void) drm_agp_unbind_memory(
- (unsigned long)entry->handle, dev);
- (void) drm_agp_free_memory(entry->handle, dev);
- drm_free(entry, sizeof (*entry), DRM_MEM_AGPLISTS);
+ (void) drm_agp_unbind_memory(entry->handle, dev);
+ kfree(entry, sizeof (*entry));
}
- dev->agp->memory = NULL;
+ INIT_LIST_HEAD(&dev->agp->memory);
if (dev->agp->acquired)
- (void) drm_agp_do_release(dev);
+ (void) drm_agp_release(dev);
dev->agp->acquired = 0;
- dev->agp->enabled = 0;
- drm_agp_fini(dev);
+ dev->agp->enabled = 0;
}
-
- if (dev->sg != NULL) {
- drm_sg_mem_t *entry;
- entry = dev->sg;
+ if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_sg_cleanup(dev->sg);
dev->sg = NULL;
- drm_sg_cleanup(dev, entry);
}
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+ !drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_dma_takedown(dev);
- /* Clean up maps that weren't set up by the driver. */
- TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) {
- if (!map->kernel_owned)
- drm_rmmap(dev, map);
- }
+ mutex_unlock(&dev->struct_mutex);
- drm_dma_takedown(dev);
- if (dev->lock.hw_lock) {
- dev->lock.hw_lock = NULL; /* SHM removed */
- dev->lock.filp = NULL;
+ DRM_DEBUG("lastclose completed\n");
+ return 0;
+}
- mutex_enter(&(dev->lock.lock_mutex));
- cv_broadcast(&(dev->lock.lock_cv));
- mutex_exit(&(dev->lock.lock_mutex));
+/**
+ * Module initialization. Called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported).
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes an array of drm_device structures, and attempts to
+ * initialize all available devices, using consecutive minors, registering the
+ * stubs and initializing the AGP device.
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+int drm_init(struct drm_device *dev, struct drm_driver *driver)
+{
+ dev_info_t *dip = dev->devinfo;
+ struct pci_dev *pdev = NULL;
+ struct dev_ops *devop;
+ int ret, i;
+
+ DRM_DEBUG("\n");
+
+ pdev = pci_dev_create(dev);
+ if (!pdev)
+ return -EFAULT;
+
+ for (i = 0; driver->id_table[i].vendor != 0; i++) {
+ if ((driver->id_table[i].vendor == pdev->vendor) &&
+ (driver->id_table[i].device == pdev->device)) {
+ ret = drm_get_dev(dev, pdev, driver, driver->id_table[i].driver_data);
+ if (ret) {
+ pci_dev_destroy(pdev);
+ return ret;
+ }
+
+ /*
+ * DRM drivers are required to use common cb_ops
+ */
+ devop = ddi_get_driver(dev->devinfo);
+ if (devop->devo_cb_ops != &drm_cb_ops) {
+ devop->devo_cb_ops = &drm_cb_ops;
+ }
+ /*
+ * Initialize for fma support
+ */
+ dev->drm_fm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "fm-capable",
+ DDI_FM_EREPORT_CAPABLE | DDI_FM_DMACHK_CAPABLE |
+ DDI_FM_ACCCHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
+
+ drm_fm_init(dev);
+ return 0;
+ }
}
- return (0);
+ pci_dev_destroy(pdev);
+ return -ENODEV;
}
-static int
-drm_load(drm_device_t *dev)
+void drm_exit(struct drm_device *dev)
{
- int retcode;
-
- cv_init(&(dev->lock.lock_cv), NULL, CV_DRIVER, NULL);
- mutex_init(&(dev->lock.lock_mutex), NULL, MUTEX_DRIVER, NULL);
- mutex_init(&(dev->dev_lock), "drmdev", MUTEX_DRIVER, NULL);
- mutex_init(&dev->irq_lock, "drmirq", MUTEX_DRIVER,
- (void *)dev->intr_block);
- mutex_init(&dev->drw_lock, "drmdrw", MUTEX_DRIVER, NULL);
- mutex_init(&dev->tasklet_lock, "drmtsk", MUTEX_DRIVER, NULL);
-
- dev->irq = pci_get_irq(dev);
- dev->pci_vendor = pci_get_vendor(dev);
- dev->pci_device = pci_get_device(dev);
-
- TAILQ_INIT(&dev->maplist);
- TAILQ_INIT(&dev->minordevs);
- TAILQ_INIT(&dev->files);
- if (dev->driver->load != NULL) {
- retcode = dev->driver->load(dev, 0);
- if (retcode != 0) {
- DRM_ERROR("drm_load: failed\n");
- goto error;
- }
- }
+ drm_put_dev(dev);
+ pci_dev_destroy(dev->pdev);
+ drm_fm_fini(dev);
+}
- retcode = drm_ctxbitmap_init(dev);
- if (retcode != 0) {
- DRM_ERROR("drm_load: Cannot allocate memory for ctx bitmap");
- goto error;
- }
+int __init drm_core_init(void)
+{
+ INIT_LIST_HEAD(&drm_iomem_list);
- if (dev->driver->use_gem == 1) {
- retcode = drm_gem_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot initialize graphics execution "
- "manager (GEM)\n");
- goto error;
- }
- }
+ idr_init(&drm_minors_idr);
- if (drm_init_kstats(dev)) {
- DRM_ERROR("drm_attach => drm_load: init kstats error");
- retcode = EFAULT;
- goto error;
- }
+ DRM_INFO("Initialized %s %d.%d.%d %s\n",
+ CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+ return 0;
+}
- DRM_INFO("!drm: Initialized %s %d.%d.%d %s ",
- dev->driver->driver_name,
- dev->driver->driver_major,
- dev->driver->driver_minor,
- dev->driver->driver_patchlevel,
- dev->driver->driver_date);
- return (0);
-
-error:
- DRM_LOCK();
- (void) drm_lastclose(dev);
- DRM_UNLOCK();
- cv_destroy(&(dev->lock.lock_cv));
- mutex_destroy(&(dev->lock.lock_mutex));
- mutex_destroy(&dev->irq_lock);
- mutex_destroy(&(dev->dev_lock));
- mutex_destroy(&dev->drw_lock);
- mutex_destroy(&dev->tasklet_lock);
-
- return (retcode);
+void __exit drm_core_exit(void)
+{
+ idr_remove_all(&drm_minors_idr);
+ idr_destroy(&drm_minors_idr);
}
-/* called when cleanup this module */
-static void
-drm_unload(drm_device_t *dev)
+/**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
{
- drm_local_map_t *map;
+ int len;
+
+ /* don't overflow userbuf */
+ len = strlen(value);
+ if (len > *buf_len)
+ len = *buf_len;
+
+ /* let userspace know exact length of driver value (which could be
+ * larger than the userspace-supplied buffer) */
+ *buf_len = strlen(value);
+
+ /* finally, try filling in the userbuf */
+ if (len && buf)
+ if (copy_to_user(buf, value, len))
+ return -EFAULT;
+ return 0;
+}
- drm_vblank_cleanup(dev);
+/**
+ * Get version information
+ *
+ * \return zero on success or negative number on failure.
+ *
+ * Fills in the version information in \p arg.
+ */
+/* LINTED */
+static int drm_version(DRM_IOCTL_ARGS)
+{
+ struct drm_version *version = data;
+ int err;
+
+ version->version_major = dev->driver->major;
+ version->version_minor = dev->driver->minor;
+ version->version_patchlevel = dev->driver->patchlevel;
+ err = drm_copy_field(version->name, &version->name_len,
+ dev->driver->name);
+ if (!err)
+ err = drm_copy_field(version->date, &version->date_len,
+ dev->driver->date);
+ if (!err)
+ err = drm_copy_field(version->desc, &version->desc_len,
+ dev->driver->desc);
+
+ return err;
+}
- drm_ctxbitmap_cleanup(dev);
+/**
+ * Called whenever a process performs an ioctl on /dev/drm.
+ *
+ * \return zero on success or negative number on failure.
+ *
+ * Looks up the ioctl function in the ::ioctls table, checking for root
+ * previleges if so required, and dispatches to the respective function.
+ */
+int drm_ioctl(dev_t dev_id, struct drm_file *file_priv,
+ int cmd, intptr_t arg, int mode, cred_t *credp)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_ioctl_desc *ioctl;
+ drm_ioctl_t *func;
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ int retcode = -EINVAL;
+ char stack_kdata[128];
+ char *kdata = NULL;
+ unsigned int usize, asize;
+
+ atomic_inc(&dev->ioctl_count);
+ atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
+ ++file_priv->ioctl_count;
+
+ if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+ ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
+ goto err_i1;
+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
+ (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+ ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ usize = asize = _IOC_SIZE(cmd);
+ cmd = ioctl->cmd;
+ }
+ else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+ ioctl = &drm_ioctls[nr];
+ cmd = ioctl->cmd;
+ usize = asize = _IOC_SIZE(cmd);
+ } else
+ goto err_i1;
+
+ /* Do not trust userspace, use our own definition */
+ func = ioctl->func;
+ /* is there a local override? */
+ if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
+ func = dev->driver->dma_ioctl;
+
+ if (!func) {
+ DRM_DEBUG("no function\n");
+ retcode = -EINVAL;
+ } else if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(credp)) ||
+ ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+ ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
+ (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
+ retcode = -EACCES;
+ } else {
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (asize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kmalloc(asize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto err_i1;
+ }
+ }
+ if (asize > usize)
+ (void) memset(kdata + usize, 0, asize - usize);
+ }
- if (dev->driver->use_gem == 1) {
- idr_list_free(&dev->object_name_idr);
- mutex_destroy(&dev->object_name_lock);
+ if (cmd & IOC_IN) {
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32 && ioctl->copyin32) {
+ if (ioctl->copyin32((void*)kdata, (void*)arg)) {
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+ } else {
+#endif
+ if (DRM_COPY_FROM_USER(kdata, (void __user *)arg,
+ _IOC_SIZE(cmd)) != 0) {
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+ }
+ retcode = func(dev_id, dev, kdata, file_priv, mode, credp);
+
+ if (cmd & IOC_OUT) {
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32 && ioctl->copyout32) {
+ if (ioctl->copyout32((void*)arg, (void*)kdata)) {
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+ } else {
+#endif
+ if (DRM_COPY_TO_USER((void __user *)arg, kdata,
+ _IOC_SIZE(cmd)) != 0)
+ retcode = -EFAULT;
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+ }
}
- DRM_LOCK();
- (void) drm_lastclose(dev);
- DRM_UNLOCK();
+err_i1:
+ if (kdata && (kdata != stack_kdata))
+ kfree(kdata, asize);
+ atomic_dec(&dev->ioctl_count);
+ if (retcode)
+ DRM_DEBUG("ret = %d\n", -retcode);
+ return retcode;
+}
+
+struct drm_local_map *drm_getsarea(struct drm_device *dev)
+{
+ struct drm_map_list *entry;
- while ((map = TAILQ_FIRST(&dev->maplist)) != NULL) {
- drm_rmmap(dev, map);
+ list_for_each_entry(entry, struct drm_map_list, &dev->maplist, head) {
+ if (entry->map && entry->map->type == _DRM_SHM &&
+ (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+ return entry->map;
+ }
}
+ return NULL;
+}
- if (dev->driver->unload != NULL)
- dev->driver->unload(dev);
+void
+drm_gem_object_reference(struct drm_gem_object *obj)
+{
+ kref_get(&obj->refcount);
+}
- drm_mem_uninit();
- cv_destroy(&dev->lock.lock_cv);
- mutex_destroy(&dev->lock.lock_mutex);
- mutex_destroy(&dev->irq_lock);
- mutex_destroy(&dev->dev_lock);
- mutex_destroy(&dev->drw_lock);
- mutex_destroy(&dev->tasklet_lock);
+void
+drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+ if (obj != NULL)
+ kref_put(&obj->refcount, drm_gem_object_free);
+}
- dev->gtt_total = 0;
- atomic_set(&dev->pin_memory, 0);
- DRM_ERROR("drm_unload");
+void
+drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
+{
+ if (obj != NULL) {
+ struct drm_device *dev = obj->dev;
+ mutex_lock(&dev->struct_mutex);
+ kref_put(&obj->refcount, drm_gem_object_free);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
+void
+drm_gem_object_handle_reference(struct drm_gem_object *obj)
+{
+ drm_gem_object_reference(obj);
+ atomic_inc(&obj->handle_count);
+}
-/*ARGSUSED*/
-int
-drm_open(drm_device_t *dev, drm_cminor_t *mp, int openflags,
- int otyp, cred_t *credp)
+void
+drm_gem_object_handle_unreference(struct drm_gem_object *obj)
{
- int retcode;
+ if (obj == NULL)
+ return;
+
+ if (atomic_read(&obj->handle_count) == 0)
+ return;
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+ if (atomic_dec_and_test(&obj->handle_count))
+ drm_gem_object_handle_free(obj);
+ drm_gem_object_unreference(obj);
+}
- retcode = drm_open_helper(dev, mp, openflags, otyp, credp);
+void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
+{
+ if (obj == NULL)
+ return;
- if (!retcode) {
- atomic_inc_32(&dev->counts[_DRM_STAT_OPENS]);
- DRM_LOCK();
- if (!dev->open_count ++)
- retcode = drm_firstopen(dev);
- DRM_UNLOCK();
- }
+ if (atomic_read(&obj->handle_count) == 0)
+ return;
- return (retcode);
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+
+ if (atomic_dec_and_test(&obj->handle_count))
+ drm_gem_object_handle_free(obj);
+ drm_gem_object_unreference_unlocked(obj);
}
-/*ARGSUSED*/
+
+/*
+ * PCI bus specific error callback
+ */
+/* ARGSUSED */
int
-drm_close(drm_device_t *dev, int minor, int flag, int otyp,
- cred_t *credp)
+drm_fm_error_cb(
+ dev_info_t *dip,
+ ddi_fm_error_t *err,
+ const void *impl)
{
- drm_cminor_t *mp;
- drm_file_t *fpriv;
- int retcode = 0;
-
- DRM_LOCK();
- mp = drm_find_file_by_minor(dev, minor);
- if (!mp) {
- DRM_UNLOCK();
- DRM_ERROR("drm_close: can't find authenticator");
- return (EACCES);
- }
- fpriv = mp->fpriv;
- ASSERT(fpriv);
+ pci_ereport_post(dip, err, NULL);
- if (--fpriv->refs != 0)
- goto done;
+ return (err->fme_status);
+}
- if (dev->driver->preclose != NULL)
- dev->driver->preclose(dev, fpriv);
+void
+drm_fm_init(struct drm_device *dev)
+{
+ dev_info_t *dip = dev->devinfo;
+ if (dev->drm_fm_cap) {
+ /* we do not care the iblk cookie */
+ ddi_fm_init(dip, &dev->drm_fm_cap, NULL);
- /*
- * Begin inline drm_release
- */
- DRM_DEBUG("drm_close :pid = %d , open_count = %d",
- DRM_CURRENTPID, dev->open_count);
-
- if (dev->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
- dev->lock.filp == fpriv) {
- DRM_DEBUG("Process %d dead, freeing lock for context %d",
- DRM_CURRENTPID,
- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
- if (dev->driver->reclaim_buffers_locked != NULL)
- dev->driver->reclaim_buffers_locked(dev, fpriv);
- (void) drm_lock_free(dev, &dev->lock.hw_lock->lock,
- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
- } else if (dev->driver->reclaim_buffers_locked != NULL &&
- dev->lock.hw_lock != NULL) {
- DRM_ERROR("drm_close: "
- "retake lock not implemented yet");
- }
+ /*
+ * Initialize pci ereport capabilities if ereport capable
+ */
+ if (DDI_FM_EREPORT_CAP(dev->drm_fm_cap) ||
+ DDI_FM_ERRCB_CAP(dev->drm_fm_cap)) {
+ pci_ereport_setup(dip);
+ }
- if (dev->driver->use_dma) {
- drm_reclaim_buffers(dev, fpriv);
+ /*
+ * Register error callback if error callback capable
+ */
+ if (DDI_FM_ERRCB_CAP(dev->drm_fm_cap)) {
+ ddi_fm_handler_register(dip,
+ drm_fm_error_cb, (void*) dev);
+ }
}
+}
- if (dev->driver->use_gem == 1) {
- drm_gem_release(dev, fpriv);
- }
+void
+drm_fm_fini(struct drm_device *dev)
+{
+ dev_info_t *dip = dev->devinfo;
+ if (dev->drm_fm_cap) {
+ if (DDI_FM_EREPORT_CAP(dev->drm_fm_cap) ||
+ DDI_FM_ERRCB_CAP(dev->drm_fm_cap)) {
+ pci_ereport_teardown(dip);
+ }
+
+ if (DDI_FM_ERRCB_CAP(dev->drm_fm_cap)) {
+ ddi_fm_handler_unregister(dip);
+ }
- if (dev->driver->postclose != NULL) {
- dev->driver->postclose(dev, fpriv);
+ ddi_fm_fini(dip);
}
- TAILQ_REMOVE(&dev->files, fpriv, link);
- drm_free(fpriv, sizeof (*fpriv), DRM_MEM_FILES);
+}
-done:
- atomic_inc_32(&dev->counts[_DRM_STAT_CLOSES]);
+/*
+ * report a device error.
+ * detail - DDI_FM_DEVICE_*
+ */
+void
+drm_fm_ereport(
+ struct drm_device *dev,
+ char *detail)
+{
+ uint64_t ena;
+ char buf[FM_MAX_CLASS];
+ dev_info_t *dip = dev->devinfo;
- TAILQ_REMOVE(&dev->minordevs, mp, link);
- drm_free(mp, sizeof (*mp), DRM_MEM_FILES);
+ if (DDI_FM_EREPORT_CAP(dev->drm_fm_cap)) {
- if (--dev->open_count == 0) {
- retcode = drm_lastclose(dev);
- }
- DRM_UNLOCK();
+ (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE,
+ detail);
- return (retcode);
-}
+ ena = fm_ena_generate(0, FM_ENA_FMT1);
-int
-drm_attach(drm_device_t *dev)
-{
- return (drm_load(dev));
+ ddi_fm_ereport_post(dip, buf, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
+ }
}
+/*
+ * Check PIO Access error, like register access.
+ */
int
-drm_detach(drm_device_t *dev)
+drm_check_acc_handle(
+ struct drm_device *dev,
+ ddi_acc_handle_t handle)
{
- drm_unload(dev);
- drm_fini_kstats(dev);
- return (DDI_SUCCESS);
-}
+ ddi_fm_error_t de;
+
+ if (!DDI_FM_ACC_ERR_CAP(dev->drm_fm_cap)) {
+
+ return (DDI_FM_OK);
-static int
-drm_get_businfo(drm_device_t *dev)
-{
- dev->irq = pci_get_irq(dev);
- if (dev->irq == -1) {
- DRM_ERROR("drm_get_businfo: get irq error");
- return (DDI_FAILURE);
- }
- /* XXX Fix domain number (alpha hoses) */
- dev->pci_domain = 0;
- if (pci_get_info(dev, &dev->pci_bus,
- &dev->pci_slot, &dev->pci_func) != DDI_SUCCESS) {
- DRM_ERROR("drm_get_businfo: get bus slot func error ");
- return (DDI_FAILURE);
}
- DRM_DEBUG("drm_get_businfo: pci bus: %d, pci slot :%d pci func %d",
- dev->pci_bus, dev->pci_slot, dev->pci_func);
- return (DDI_SUCCESS);
+
+ ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
+ ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
+
+ return (de.fme_status);
+
}
+/*
+ * Check DMA Access error.
+ */
int
-drm_probe(drm_device_t *dev, drm_pci_id_list_t *idlist)
+drm_check_dma_handle(
+ struct drm_device *dev,
+ ddi_dma_handle_t handle)
{
- const char *s = NULL;
- int vendor, device;
-
- vendor = pci_get_vendor(dev);
- device = pci_get_device(dev);
-
- s = drm_find_description(vendor, device, idlist);
- if (s != NULL) {
- dev->desc = s;
- if (drm_get_businfo(dev) != DDI_SUCCESS) {
- DRM_ERROR("drm_probe: drm get bus info error");
- return (DDI_FAILURE);
- }
- return (DDI_SUCCESS);
+ ddi_fm_error_t de;
+
+ if (!DDI_FM_DMA_ERR_CAP(dev->drm_fm_cap)) {
+
+ return (DDI_FM_OK);
+
}
- return (DDI_FAILURE);
+
+ ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
+ ddi_fm_dma_err_clear(handle, DDI_FME_VERSION);
+
+ return (de.fme_status);
+
}
diff --git a/usr/src/uts/common/io/drm/drm_edid.c b/usr/src/uts/common/io/drm/drm_edid.c
new file mode 100644
index 0000000..15fe208
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_edid.c
@@ -0,0 +1,2995 @@
+/*
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006 Luc Verhaegen (quirks list)
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
+ * FB layer.
+ * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "drm.h"
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_sun_i2c.h"
+#include "drm_crtc.h"
+
+#define version_greater(edid, maj, min) \
+ (((edid)->version > (maj)) || \
+ ((edid)->version == (maj) && (edid)->revision > (min)))
+
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
+
+/*
+ * EDID blocks out in the wild have a variety of bugs, try to collect
+ * them here (note that userspace may work around broken monitors first,
+ * but fixes should make their way here so that the kernel "just works"
+ * on as many displays as possible).
+ */
+
+/* First detailed mode wrong, use largest 60Hz mode */
+#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
+/* Reported 135MHz pixel clock is too high, needs adjustment */
+#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
+/* Prefer the largest mode at 75 Hz */
+#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
+/* Detail timing is in cm not mm */
+#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
+/* Detailed timing descriptors have bogus size values, so just take the
+ * maximum size and use that.
+ */
+#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
+/* Monitor forgot to set the first detailed is preferred bit. */
+#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
+/* use +hsync +vsync for detailed mode */
+#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
+/* Force reduced-blanking timings for detailed modes */
+#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
+
+struct detailed_mode_closure {
+ struct drm_connector *connector;
+ struct edid *edid;
+ bool preferred;
+ u32 quirks;
+ int modes;
+};
+
+#define LEVEL_DMT 0
+#define LEVEL_GTF 1
+#define LEVEL_GTF2 2
+#define LEVEL_CVT 3
+
+static struct edid_quirk {
+ char vendor[4];
+ int product_id;
+ u32 quirks;
+} edid_quirk_list[] = {
+ /* Acer AL1706 */
+ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Acer F51 */
+ { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Unknown Acer */
+ { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Belinea 10 15 55 */
+ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
+ { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* Envision Peripherals, Inc. EN-7100e */
+ { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+ /* Envision EN2028 */
+ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* Funai Electronics PM36B */
+ { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
+ EDID_QUIRK_DETAILED_IN_CM },
+
+ /* LG Philips LCD LP154W01-A5 */
+ { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+ { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+
+ /* Philips 107p5 CRT */
+ { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Proview AY765C */
+ { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Samsung SyncMaster 205BW. Note: irony */
+ { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
+ /* Samsung SyncMaster 22[5-6]BW */
+ { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
+ { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* ViewSonic VA2026w */
+ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+};
+
+/*
+ * Autogenerated from the DMT spec.
+ * This table is copied from xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode drm_dmt_modes[] = {
+ /* 640x350@85Hz */
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 350, 382, 385, 445, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x400@85Hz */
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 400, 401, 404, 445, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x400@85Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
+ 828, 936, 0, 400, 401, 404, 446, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 492, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@85Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
+ 752, 832, 0, 480, 481, 484, 509, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@56Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@72Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@85Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
+ 896, 1048, 0, 600, 601, 604, 631, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@120Hz RB */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
+ 880, 960, 0, 600, 603, 607, 636, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 848x480@60Hz */
+ { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
+ 976, 1088, 0, 480, 486, 494, 517, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@43Hz, interlace */
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 772, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@85Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@120Hz RB */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
+ 1104, 1184, 0, 768, 771, 775, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 790, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@75Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
+ 1488, 1696, 0, 768, 771, 778, 805, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@85Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
+ 1496, 1712, 0, 768, 771, 778, 809, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@120Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 823, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@75Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
+ 1488, 1696, 0, 800, 803, 809, 838, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@85Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
+ 1496, 1712, 0, 800, 803, 809, 843, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@120Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 847, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@85Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
+ 1504, 1728, 0, 960, 961, 964, 1011, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@120Hz RB */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
+ 1360, 1440, 0, 960, 963, 967, 1017, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@75Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@85Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
+ 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@120Hz RB */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
+ 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@120Hz RB */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
+ 1440, 1520, 0, 768, 771, 776, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@75Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
+ 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@85Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
+ 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@120Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 926, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@75Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
+ 1688, 1936, 0, 900, 903, 909, 942, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@85Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
+ 1696, 1952, 0, 900, 903, 909, 948, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@120Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 953, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@65Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@70Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@75Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@85Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@120Hz RB */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
+ 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@75Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
+ 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@85Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
+ 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@120Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@75Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
+ 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@120Hz RB */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
+ 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1856x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@75Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
+ 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@120Hz RB */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
+ 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@75Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
+ 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@85Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
+ 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@120Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@75Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
+ 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@120Hz RB */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
+ 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@75HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@85HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@120Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+};
+
+static const struct drm_display_mode edid_est_modes[] = {
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+ 768, 864, 0, 480, 483, 486, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+ 846, 900, 0, 400, 421, 423, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+ 846, 900, 0, 400, 412, 414, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+ 928, 1152, 0, 624, 625, 628, 667, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+struct minimode {
+ short w;
+ short h;
+ short r;
+ short rb;
+};
+
+static const struct minimode est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 85, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+
+static const struct minimode extra_modes[] = {
+ { 1024, 576, 60, 0 },
+ { 1366, 768, 60, 0 },
+ { 1600, 900, 60, 0 },
+ { 1680, 945, 60, 0 },
+ { 1920, 1080, 60, 0 },
+ { 2048, 1152, 60, 0 },
+ { 2048, 1536, 60, 0 },
+};
+
+/*
+ * Probably taken from CEA-861 spec.
+ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode edid_cea_modes[] = {
+ /* 1 - 640x480@60Hz */
+ { .vrefresh = 60, DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2 - 720x480@60Hz */
+ { .vrefresh = 60, DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 3 - 720x480@60Hz */
+ { .vrefresh = 60, DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 4 - 1280x720@60Hz */
+ { .vrefresh = 60, DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 5 - 1920x1080i@60Hz */
+ { .vrefresh = 60, DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 6 - 1440x480i@60Hz */
+ { .vrefresh = 60, DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 7 - 1440x480i@60Hz */
+ { .vrefresh = 60, DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 8 - 1440x240@60Hz */
+ { .vrefresh = 60, DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK)},
+ /* 9 - 1440x240@60Hz */
+ { .vrefresh = 60, DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK)},
+ /* 10 - 2880x480i@60Hz */
+ { .vrefresh = 60, DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 11 - 2880x480i@60Hz */
+ { .vrefresh = 60, DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 12 - 2880x240@60Hz */
+ { .vrefresh = 60, DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 13 - 2880x240@60Hz */
+ { .vrefresh = 60, DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 14 - 1440x480@60Hz */
+ { .vrefresh = 60, DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 15 - 1440x480@60Hz */
+ { .vrefresh = 60, DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 16 - 1920x1080@60Hz */
+ { .vrefresh = 60, DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 17 - 720x576@50Hz */
+ { .vrefresh = 50, DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 18 - 720x576@50Hz */
+ { .vrefresh = 50, DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 19 - 1280x720@50Hz */
+ { .vrefresh = 50, DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 20 - 1920x1080i@50Hz */
+ { .vrefresh = 50, DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 21 - 1440x576i@50Hz */
+ { .vrefresh = 50, DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 22 - 1440x576i@50Hz */
+ { .vrefresh = 50, DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 23 - 1440x288@50Hz */
+ { .vrefresh = 50, DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK)},
+ /* 24 - 1440x288@50Hz */
+ { .vrefresh = 50, DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK)},
+ /* 25 - 2880x576i@50Hz */
+ { .vrefresh = 50, DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 26 - 2880x576i@50Hz */
+ { .vrefresh = 50, DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 27 - 2880x288@50Hz */
+ { .vrefresh = 50, DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 28 - 2880x288@50Hz */
+ { .vrefresh = 50, DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 29 - 1440x576@50Hz */
+ { .vrefresh = 50, DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 30 - 1440x576@50Hz */
+ { .vrefresh = 50, DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 31 - 1920x1080@50Hz */
+ { .vrefresh = 50, DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 32 - 1920x1080@24Hz */
+ { .vrefresh = 50, DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 33 - 1920x1080@25Hz */
+ { .vrefresh = 25, DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 34 - 1920x1080@30Hz */
+ { .vrefresh = 30, DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 35 - 2880x480@60Hz */
+ { .vrefresh = 60, DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 36 - 2880x480@60Hz */
+ { .vrefresh = 60, DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 37 - 2880x576@50Hz */
+ { .vrefresh = 50, DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 38 - 2880x576@50Hz */
+ { .vrefresh = 50, DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 39 - 1920x1080i@50Hz */
+ { .vrefresh = 50, DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+ 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 40 - 1920x1080i@100Hz */
+ { .vrefresh = 100, DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 41 - 1280x720@100Hz */
+ { .vrefresh = 100, DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 42 - 720x576@100Hz */
+ { .vrefresh = 100, DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 43 - 720x576@100Hz */
+ { .vrefresh = 100, DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 44 - 1440x576i@100Hz */
+ { .vrefresh = 100, DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK)},
+ /* 45 - 1440x576i@100Hz */
+ { .vrefresh = 100, DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK)},
+ /* 46 - 1920x1080i@120Hz */
+ { .vrefresh = 120, DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 47 - 1280x720@120Hz */
+ { .vrefresh = 120, DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 48 - 720x480@120Hz */
+ { .vrefresh = 120, DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 49 - 720x480@120Hz */
+ { .vrefresh = 120, DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 50 - 1440x480i@120Hz */
+ { .vrefresh = 120, DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 51 - 1440x480i@120Hz */
+ { .vrefresh = 120, DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 52 - 720x576@200Hz */
+ { .vrefresh = 200, DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 53 - 720x576@200Hz */
+ { .vrefresh = 200, DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 54 - 1440x576i@200Hz */
+ { .vrefresh = 200, DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 55 - 1440x576i@200Hz */
+ { .vrefresh = 200, DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 56 - 720x480@240Hz */
+ { .vrefresh = 240, DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 57 - 720x480@240Hz */
+ { .vrefresh = 240, DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 58 - 1440x480i@240 */
+ { .vrefresh = 240, DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 59 - 1440x480i@240 */
+ { .vrefresh = 240, DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 60 - 1280x720@24Hz */
+ { .vrefresh = 24, DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 61 - 1280x720@25Hz */
+ { .vrefresh = 25, DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+ 3740, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 62 - 1280x720@30Hz */
+ { .vrefresh = 30, DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 63 - 1920x1080@120Hz */
+ { .vrefresh = 120, DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 64 - 1920x1080@100Hz */
+ { .vrefresh = 100, DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+};
+
+/*** DDC fetch and block validation ***/
+
+static const u8 edid_header[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+};
+
+ /*
+ * Sanity check the header of the base EDID block. Return 8 if the header
+ * is perfect, down to 0 if it's totally wrong.
+ */
+int drm_edid_header_is_valid(const u8 *raw_edid)
+{
+ int i, score = 0;
+
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
+
+ return score;
+}
+
+static int edid_fixup = 6;
+/*
+ * Sanity check the EDID block (base or extension). Return 0 if the block
+ * doesn't check out, or 1 if it's valid.
+ */
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
+{
+ int i;
+ u8 csum = 0;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (!raw_edid) {
+ WARN_ON(1);
+ return false;
+ }
+ if (edid_fixup > 8 || edid_fixup < 0)
+ edid_fixup = 6;
+
+ if (block == 0) {
+ int score = drm_edid_header_is_valid(raw_edid);
+ if (score == 8)
+ DRM_DEBUG("edid header is perfect");
+ else if (score >= edid_fixup) {
+ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+ (void) memcpy(raw_edid, edid_header, sizeof(edid_header));
+ } else {
+ goto bad;
+ }
+ }
+
+ for (i = 0; i < EDID_LENGTH; i++)
+ csum += raw_edid[i];
+ if (csum) {
+ if (print_bad_edid) {
+ DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+ }
+
+ /* allow CEA to slide through, switches mangle this */
+ if (raw_edid[0] != 0x02)
+ goto bad;
+ }
+
+ /* per-block-type checks */
+ switch (raw_edid[0]) {
+ case 0: /* base */
+ if (edid->version != 1) {
+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ goto bad;
+ }
+
+ if (edid->revision > 4)
+ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+
+bad:
+ if (print_bad_edid) {
+ DRM_DEBUG_KMS("Raw EDID:\n");
+// print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
+ }
+ return false;
+}
+
+/**
+ * drm_edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
+ */
+bool drm_edid_is_valid(struct edid *edid)
+{
+ int i;
+ u8 *raw = (u8 *)edid;
+
+ if (!edid)
+ return false;
+
+ for (i = 0; i <= edid->extensions; i++)
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
+ return false;
+
+ return true;
+}
+
+#define DDC_SEGMENT_ADDR 0x30
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf : EDID data buffer to be filled
+ * \param len : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+static int
+drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+ int block, int len)
+{
+ unsigned char start = block * EDID_LENGTH;
+ unsigned char segment = block >> 1;
+ unsigned char xfers = segment ? 3 : 2;
+ int ret, retries = 5;
+
+ /* The core i2c driver will automatically retry the transfer if the
+ * adapter reports EAGAIN. However, we find that bit-banging transfers
+ * are susceptible to errors under a heavily loaded machine and
+ * generate spurious NAKs and timeouts. Retrying the transfer
+ * of the individual block a few times seems to overcome this.
+ */
+ do {
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_SEGMENT_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &segment,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = (u16)len,
+ .buf = buf,
+ }
+ };
+
+ /*
+ * Avoid sending the segment addr to not upset non-compliant ddc
+ * monitors.
+ */
+ ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
+
+ if (ret == -ENXIO) {
+ DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
+ adapter->name);
+ break;
+ }
+ } while (ret != xfers && --retries);
+
+ return ret == xfers ? 0 : -1;
+}
+
+static bool drm_edid_is_zero(u8 *in_edid, int length)
+{
+ int i;
+ u32 *raw_edid = (u32 *)(uintptr_t)(caddr_t)in_edid;
+
+ for (i = 0; i < length / 4; i++)
+ if (*(raw_edid + i) != 0)
+ return false;
+ return true;
+}
+
+static struct edid *
+drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+ int i, j = 0;
+ u8 *block, valid_extensions = 0;
+ bool print_bad_edid = !connector->bad_edid_counter;
+
+ /* try to allock max memory at first time */
+ if ((block = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1), GFP_KERNEL)) == NULL)
+ return NULL;
+
+ /* base block fetch */
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block, 0, print_bad_edid))
+ break;
+ if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
+ connector->null_edid_counter++;
+ goto carp;
+ }
+ }
+ if (i == 4)
+ goto carp;
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+ return (struct edid *) block;
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter,
+ block + (valid_extensions + 1) * EDID_LENGTH,
+ j, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
+ valid_extensions++;
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_ERROR("%s: Ignoring invalid EDID block %d.\n",
+ drm_get_connector_name(connector), j);
+ }
+
+ if (valid_extensions != block[0x7e]) {
+ block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+ block[0x7e] = valid_extensions;
+ }
+
+ return (struct edid *) block;
+
+carp:
+ if (print_bad_edid) {
+ DRM_DEBUG_KMS("%s: EDID block %d invalid.\n",
+ drm_get_connector_name(connector), j);
+ }
+ connector->bad_edid_counter++;
+
+out:
+ kfree(block, EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1));
+ return NULL;
+}
+
+/**
+ * Probe DDC presence.
+ *
+ * \param adapter : i2c device adaptor
+ * \return 1 on success
+ */
+bool
+drm_probe_ddc(struct i2c_adapter *adapter)
+{
+ unsigned char out;
+
+ return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given i2c channel to grab EDID data if possible. If found,
+ * attach it to the connector.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid = NULL;
+
+ if (drm_probe_ddc(adapter))
+ edid = drm_do_get_edid(connector, adapter);
+
+ return edid;
+}
+
+/*** EDID parsing ***/
+
+/**
+ * edid_vendor - match a string against EDID's obfuscated vendor field
+ * @edid: EDID to match
+ * @vendor: vendor string
+ *
+ * Returns true if @vendor is in @edid, false otherwise
+ */
+static bool edid_vendor(struct edid *edid, char *vendor)
+{
+ char edid_vendor[3];
+
+ edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
+ edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
+ ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
+ edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
+
+ return !strncmp(edid_vendor, vendor, 3);
+}
+
+/**
+ * edid_get_quirks - return quirk flags for a given EDID
+ * @edid: EDID to process
+ *
+ * This tells subsequent routines what fixes they need to apply.
+ */
+static u32 edid_get_quirks(struct edid *edid)
+{
+ struct edid_quirk *quirk;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+ quirk = &edid_quirk_list[i];
+
+ if (edid_vendor(edid, quirk->vendor) &&
+ (EDID_PRODUCT_ID(edid) == quirk->product_id))
+ return quirk->quirks;
+ }
+
+ return 0;
+}
+
+#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
+#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
+
+/**
+ * edid_fixup_preferred - set preferred modes based on quirk list
+ * @connector: has mode list to fix up
+ * @quirks: quirks list
+ *
+ * Walk the mode list for @connector, clearing the preferred status
+ * on existing modes and setting it anew for the right mode ala @quirks.
+ */
+static void edid_fixup_preferred(struct drm_connector *connector,
+ u32 quirks)
+{
+ struct drm_display_mode *t, *cur_mode, *preferred_mode;
+ int target_refresh = 0;
+
+ if (list_empty(&connector->probed_modes))
+ return;
+
+ if (quirks & EDID_QUIRK_PREFER_LARGE_60)
+ target_refresh = 60;
+ if (quirks & EDID_QUIRK_PREFER_LARGE_75)
+ target_refresh = 75;
+
+ preferred_mode = list_first_entry(&connector->probed_modes,
+ struct drm_display_mode, head);
+
+ list_for_each_entry_safe(cur_mode, t, struct drm_display_mode, &connector->probed_modes, head) {
+ cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+
+ if (cur_mode == preferred_mode)
+ continue;
+
+ /* Largest mode is preferred */
+ if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
+ preferred_mode = cur_mode;
+
+ /* At a given size, try to get closest to target refresh */
+ if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
+ MODE_REFRESH_DIFF(cur_mode, target_refresh) <
+ MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
+ preferred_mode = cur_mode;
+ }
+ }
+
+ preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
+}
+
+static bool
+mode_is_rb(const struct drm_display_mode *mode)
+{
+ return (mode->htotal - mode->hdisplay == 160) &&
+ (mode->hsync_end - mode->hdisplay == 80) &&
+ (mode->hsync_end - mode->hsync_start == 32) &&
+ (mode->vsync_start - mode->vdisplay == 3);
+}
+
+/*
+ * drm_mode_find_dmt - Create a copy of a mode if present in DMT
+ * @dev: Device to duplicate against
+ * @hsize: Mode width
+ * @vsize: Mode height
+ * @fresh: Mode refresh rate
+ * @rb: Mode reduced-blanking-ness
+ *
+ * Walk the DMT mode list looking for a match for the given parameters.
+ * Return a newly allocated copy of the mode, or NULL if not found.
+ */
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh,
+ bool rb)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
+ if (hsize != ptr->hdisplay)
+ continue;
+ if (vsize != ptr->vdisplay)
+ continue;
+ if (fresh != drm_mode_vrefresh(ptr))
+ continue;
+ if (rb != mode_is_rb(ptr))
+ continue;
+
+ return drm_mode_duplicate(dev, ptr);
+ }
+
+ return NULL;
+}
+
+typedef void detailed_cb(struct detailed_timing *timing, void *closure);
+
+static void
+cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+ int i, n = 0;
+ u8 d = ext[0x02];
+ u8 *det_base = ext + d;
+
+ n = (127 - d) / 18;
+ for (i = 0; i < n; i++)
+ cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
+vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+ unsigned int i, n = min((int)ext[0x02], 6);
+ u8 *det_base = ext + 5;
+
+ if (ext[0x01] != 1)
+ return; /* unknown version */
+
+ for (i = 0; i < n; i++)
+ cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
+{
+ int i;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (edid == NULL)
+ return;
+
+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
+ cb(&(edid->detailed_timings[i]), closure);
+
+ for (i = 1; i <= raw_edid[0x7e]; i++) {
+ u8 *ext = raw_edid + (i * EDID_LENGTH);
+ switch (*ext) {
+ case CEA_EXT:
+ cea_for_each_detailed_block(ext, cb, closure);
+ break;
+ case VTB_EXT:
+ vtb_for_each_detailed_block(ext, cb, closure);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+is_rb(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE)
+ if (r[15] & 0x10)
+ *(bool *)data = true;
+}
+
+/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
+static bool
+drm_monitor_supports_rb(struct edid *edid)
+{
+ if (edid->revision >= 4) {
+ bool ret = false;
+ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+ return ret;
+ }
+
+ return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+}
+
+static void
+find_gtf2(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+ *(u8 **)data = r;
+}
+
+/* Secondary GTF curve kicks in above some break frequency */
+static int
+drm_gtf2_hbreak(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[12] * 2) : 0;
+}
+
+static int
+drm_gtf2_2c(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[13] : 0;
+}
+
+static int
+drm_gtf2_m(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[15] << 8) + r[14] : 0;
+}
+
+static int
+drm_gtf2_k(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[16] : 0;
+}
+
+static int
+drm_gtf2_2j(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[17] : 0;
+}
+
+/**
+ * standard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+ if (edid->revision >= 2) {
+ if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+ return LEVEL_CVT;
+ if (drm_gtf2_hbreak(edid))
+ return LEVEL_GTF2;
+ return LEVEL_GTF;
+ }
+ return LEVEL_DMT;
+}
+
+/*
+ * 0 is reserved. The spec says 0x01 fill for unused timings. Some old
+ * monitors fill with ascii space (0x20) instead.
+ */
+static int
+bad_std_timing(u8 a, u8 b)
+{
+ return (a == 0x00 && b == 0x00) ||
+ (a == 0x01 && b == 0x01) ||
+ (a == 0x20 && b == 0x20);
+}
+
+/**
+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
+ * @t: standard timing params
+ * @timing_level: standard timing level
+ *
+ * Take the standard timing params (in this case width, aspect, and refresh)
+ * and convert them into a real mode using CVT/GTF/DMT.
+ */
+static struct drm_display_mode *
+drm_mode_std(struct drm_connector *connector, struct edid *edid,
+ struct std_timing *t, int revision)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *m, *mode = NULL;
+ int hsize, vsize;
+ int vrefresh_rate;
+ unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
+ >> EDID_TIMING_ASPECT_SHIFT;
+ unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
+ >> EDID_TIMING_VFREQ_SHIFT;
+ int timing_level = standard_timing_level(edid);
+
+ if (bad_std_timing(t->hsize, t->vfreq_aspect))
+ return NULL;
+
+ /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
+ hsize = t->hsize * 8 + 248;
+ /* vrefresh_rate = vfreq + 60 */
+ vrefresh_rate = vfreq + 60;
+ /* the vdisplay is calculated based on the aspect ratio */
+ if (aspect_ratio == 0) {
+ if (revision < 3)
+ vsize = hsize;
+ else
+ vsize = (hsize * 10) / 16;
+ } else if (aspect_ratio == 1)
+ vsize = (hsize * 3) / 4;
+ else if (aspect_ratio == 2)
+ vsize = (hsize * 4) / 5;
+ else
+ vsize = (hsize * 9) / 16;
+
+ /* HDTV hack, part 1 */
+ if (vrefresh_rate == 60 &&
+ ((hsize == 1360 && vsize == 765) ||
+ (hsize == 1368 && vsize == 769))) {
+ hsize = 1366;
+ vsize = 768;
+ }
+
+ /*
+ * If this connector already has a mode for this size and refresh
+ * rate (because it came from detailed or CVT info), use that
+ * instead. This way we don't have to guess at interlace or
+ * reduced blanking.
+ */
+ list_for_each_entry(m, struct drm_display_mode, &connector->probed_modes, head)
+ if (m->hdisplay == hsize && m->vdisplay == vsize &&
+ drm_mode_vrefresh(m) == vrefresh_rate)
+ return NULL;
+
+ /* HDTV hack, part 2 */
+ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
+ mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
+ false);
+ if (mode != NULL) {
+ mode->hdisplay = 1366;
+ mode->hsync_start = mode->hsync_start - 1;
+ mode->hsync_end = mode->hsync_end - 1;
+ }
+ return mode;
+ }
+
+ /* check whether it can be found in default mode table */
+ if (drm_monitor_supports_rb(edid)) {
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
+ true);
+ if (mode)
+ return mode;
+ }
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false);
+ if (mode)
+ return mode;
+
+ /* okay, generate it */
+ switch (timing_level) {
+ case LEVEL_DMT:
+ break;
+ case LEVEL_GTF:
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ break;
+ case LEVEL_GTF2:
+ /*
+ * This is potentially wrong if there's ever a monitor with
+ * more than one ranges section, each claiming a different
+ * secondary GTF curve. Please don't do that.
+ */
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ if (!mode)
+ return NULL;
+ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+ drm_mode_destroy(dev, mode);
+ mode = drm_gtf_mode_complex(dev, hsize, vsize,
+ vrefresh_rate, 0, 0,
+ drm_gtf2_m(edid),
+ drm_gtf2_2c(edid),
+ drm_gtf2_k(edid),
+ drm_gtf2_2j(edid));
+ }
+ break;
+ case LEVEL_CVT:
+ mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+ false);
+ break;
+ }
+ return mode;
+}
+
+/*
+ * EDID is delightfully ambiguous about how interlaced modes are to be
+ * encoded. Our internal representation is of frame height, but some
+ * HDTV detailed timings are encoded as field height.
+ *
+ * The format list here is from CEA, in frame size. Technically we
+ * should be checking refresh rate too. Whatever.
+ */
+static void
+drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
+ struct detailed_pixel_timing *pt)
+{
+ int i;
+ static const struct {
+ int w, h;
+ } cea_interlaced[] = {
+ { 1920, 1080 },
+ { 720, 480 },
+ { 1440, 480 },
+ { 2880, 480 },
+ { 720, 576 },
+ { 1440, 576 },
+ { 2880, 576 },
+ };
+
+ if (!(pt->misc & DRM_EDID_PT_INTERLACED))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
+ if ((mode->hdisplay == cea_interlaced[i].w) &&
+ (mode->vdisplay == cea_interlaced[i].h / 2)) {
+ mode->vdisplay *= 2;
+ mode->vsync_start *= 2;
+ mode->vsync_end *= 2;
+ mode->vtotal *= 2;
+ mode->vtotal |= 1;
+ }
+ }
+
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+}
+
+/**
+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
+ * @dev: DRM device (needed to create new mode)
+ * @edid: EDID block
+ * @timing: EDID detailed timing info
+ * @quirks: quirks to apply
+ *
+ * An EDID detailed timing block contains enough info for us to create and
+ * return a new struct drm_display_mode.
+ */
+static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ struct edid *edid,
+ struct detailed_timing *timing,
+ u32 quirks)
+{
+ struct drm_display_mode *mode;
+ struct detailed_pixel_timing *pt = &timing->data.pixel_data;
+ unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
+ unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
+ unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
+ unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
+ unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
+ unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
+ unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
+
+ /* ignore tiny modes */
+ if (hactive < 64 || vactive < 64)
+ return NULL;
+
+ if (pt->misc & DRM_EDID_PT_STEREO) {
+ DRM_ERROR("stereo mode not supported\n");
+ return NULL;
+ }
+ if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
+ DRM_ERROR("integrated sync not supported\n");
+ }
+
+ /* it is incorrect if hsync/vsync width is zero */
+ if (!hsync_pulse_width || !vsync_pulse_width) {
+ DRM_DEBUG_KMS("Incorrect Detailed timing. "
+ "Wrong Hsync/Vsync pulse width\n");
+ return NULL;
+ }
+
+ if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+ mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+ if (!mode)
+ return NULL;
+
+ goto set_size;
+ }
+
+ mode = drm_mode_create(dev);
+ if (!mode)
+ return NULL;
+
+ if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+ timing->pixel_clock = cpu_to_le16(1088);
+
+ mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
+
+ mode->hdisplay = hactive;
+ mode->hsync_start = mode->hdisplay + hsync_offset;
+ mode->hsync_end = mode->hsync_start + hsync_pulse_width;
+ mode->htotal = mode->hdisplay + hblank;
+
+ mode->vdisplay = vactive;
+ mode->vsync_start = mode->vdisplay + vsync_offset;
+ mode->vsync_end = mode->vsync_start + vsync_pulse_width;
+ mode->vtotal = mode->vdisplay + vblank;
+
+ /* Some EDIDs have bogus h/vtotal values */
+ if (mode->hsync_end > mode->htotal)
+ mode->htotal = mode->hsync_end + 1;
+ if (mode->vsync_end > mode->vtotal)
+ mode->vtotal = mode->vsync_end + 1;
+
+ drm_mode_do_interlace_quirk(mode, pt);
+
+ if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
+ }
+
+ mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+ mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+
+set_size:
+ mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+ mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
+
+ if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
+ mode->width_mm *= 10;
+ mode->height_mm *= 10;
+ }
+
+ if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+ mode->width_mm = edid->width_cm * 10;
+ mode->height_mm = edid->height_cm * 10;
+ }
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_name(mode);
+
+ return mode;
+}
+
+static bool
+mode_in_hsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
+{
+ int hsync, hmin, hmax;
+
+ hmin = t[7];
+ if (edid->revision >= 4)
+ hmin += ((t[4] & 0x04) ? 255 : 0);
+ hmax = t[8];
+ if (edid->revision >= 4)
+ hmax += ((t[4] & 0x08) ? 255 : 0);
+ hsync = drm_mode_hsync(mode);
+
+ return (hsync <= hmax && hsync >= hmin);
+}
+
+static bool
+mode_in_vsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
+{
+ int vsync, vmin, vmax;
+
+ vmin = t[5];
+ if (edid->revision >= 4)
+ vmin += ((t[4] & 0x01) ? 255 : 0);
+ vmax = t[6];
+ if (edid->revision >= 4)
+ vmax += ((t[4] & 0x02) ? 255 : 0);
+ vsync = drm_mode_vrefresh(mode);
+
+ return (vsync <= vmax && vsync >= vmin);
+}
+
+static u32
+range_pixel_clock(struct edid *edid, u8 *t)
+{
+ /* unspecified */
+ if (t[9] == 0 || t[9] == 255)
+ return 0;
+
+ /* 1.4 with CVT support gives us real precision, yay */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ return (t[9] * 10000) - ((t[12] >> 2) * 250);
+
+ /* 1.3 is pathetic, so fuzz up a bit */
+ return t[9] * 10000 + 5001;
+}
+
+static bool
+mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ u32 max_clock;
+ u8 *t = (u8 *)timing;
+
+ if (!mode_in_hsync_range(mode, edid, t))
+ return false;
+
+ if (!mode_in_vsync_range(mode, edid, t))
+ return false;
+
+ if ((max_clock = range_pixel_clock(edid, t)))
+ if (mode->clock > max_clock)
+ return false;
+
+ /* 1.4 max horizontal check */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
+ return false;
+
+ if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+ return false;
+
+ return true;
+}
+
+static bool valid_inferred_mode(const struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ struct drm_display_mode *m;
+ bool ok = false;
+
+ list_for_each_entry(m, struct drm_display_mode, &connector->probed_modes, head) {
+ if (mode->hdisplay == m->hdisplay &&
+ mode->vdisplay == m->vdisplay &&
+ drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+ return false; /* duplicated */
+ if (mode->hdisplay <= m->hdisplay &&
+ mode->vdisplay <= m->vdisplay)
+ ok = true;
+ }
+ return ok;
+}
+
+static int
+drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
+ if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+ valid_inferred_mode(connector, drm_dmt_modes + i)) {
+ newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ return modes;
+}
+
+/* fix up 1366x768 mode from 1368x768;
+ * GFT/CVT can't express 1366 width which isn't dividable by 8
+ */
+static void fixup_mode_1366x768(struct drm_display_mode *mode)
+{
+ if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
+ mode->hdisplay = 1366;
+ mode->hsync_start--;
+ mode->hsync_end--;
+ drm_mode_set_name(mode);
+ }
+}
+
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
+ const struct minimode *m = &extra_modes[i];
+ newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
+ if (!newmode)
+ return modes;
+
+ fixup_mode_1366x768(newmode);
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
+ drm_mode_destroy(dev, newmode);
+ continue;
+ }
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ return modes;
+}
+
+static int
+drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ bool rb = drm_monitor_supports_rb(edid);
+
+ for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
+ const struct minimode *m = &extra_modes[i];
+ newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
+ if (!newmode)
+ return modes;
+
+ fixup_mode_1366x768(newmode);
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
+ drm_mode_destroy(dev, newmode);
+ continue;
+ }
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ return modes;
+}
+
+static void
+do_inferred_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ struct detailed_data_monitor_range *range = &data->data.range;
+
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ return;
+
+ closure->modes += drm_dmt_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+
+ if (!version_greater(closure->edid, 1, 1))
+ return; /* GTF not defined yet */
+
+ switch (range->flags) {
+ case 0x02: /* secondary gtf, XXX could do more */
+ case 0x00: /* default gtf */
+ closure->modes += drm_gtf_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+ break;
+ case 0x04: /* cvt, only in 1.4+ */
+ if (!version_greater(closure->edid, 1, 3))
+ break;
+
+ closure->modes += drm_cvt_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+ break;
+ case 0x01: /* just the ranges, no formula */
+ default:
+ break;
+ }
+}
+
+static int
+add_inferred_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid, do_inferred_modes,
+ &closure);
+
+ return closure.modes;
+}
+
+static int
+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
+{
+ int i, j, m, modes = 0;
+ struct drm_display_mode *mode;
+ u8 *est = ((u8 *)timing) + 5;
+
+ for (i = 0; i < 6; i++) {
+ for (j = 7; j > 0; j--) {
+ m = (i * 8) + (7 - j);
+ if (m >= ARRAY_SIZE(est3_modes))
+ break;
+ if (est[i] & (1 << j)) {
+ mode = drm_mode_find_dmt(connector->dev,
+ est3_modes[m].w,
+ est3_modes[m].h,
+ est3_modes[m].r,
+ est3_modes[m].rb);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
+static void
+do_established_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+
+ if (data->type == EDID_DETAIL_EST_TIMINGS)
+ closure->modes += drm_est3_modes(closure->connector, timing);
+}
+
+/**
+ * add_established_modes - get est. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+ * (defined above). Tease them out and add them to the global modes list.
+ */
+static int
+add_established_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ unsigned long est_bits = edid->established_timings.t1 |
+ (edid->established_timings.t2 << 8) |
+ ((edid->established_timings.mfg_rsvd & 0x80) << 9);
+ int i, modes = 0;
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ for (i = 0; i <= EDID_EST_TIMINGS; i++) {
+ if (est_bits & (1<<i)) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid,
+ do_established_modes, &closure);
+
+ return modes + closure.modes;
+}
+
+static void
+do_standard_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ struct drm_connector *connector = closure->connector;
+ struct edid *edid = closure->edid;
+
+ if (data->type == EDID_DETAIL_STD_MODES) {
+ int i;
+ for (i = 0; i < 6; i++) {
+ struct std_timing *std;
+ struct drm_display_mode *newmode;
+
+ std = &data->data.timings[i];
+ newmode = drm_mode_std(connector, edid, std,
+ edid->revision);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ closure->modes++;
+ }
+ }
+ }
+}
+
+/**
+ * add_standard_modes - get std. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Standard modes can be calculated using the appropriate standard (DMT,
+ * GTF or CVT. Grab them from @edid and add them to the list.
+ */
+static int
+add_standard_modes(struct drm_connector *connector, struct edid *edid)
+{
+ int i, modes = 0;
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ for (i = 0; i < EDID_STD_TIMINGS; i++) {
+ struct drm_display_mode *newmode;
+
+ newmode = drm_mode_std(connector, edid,
+ &edid->standard_timings[i],
+ edid->revision);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid, do_standard_modes,
+ &closure);
+
+ /* XXX should also look for standard codes in VTB blocks */
+
+ return modes + closure.modes;
+}
+
+static int drm_cvt_modes(struct drm_connector *connector,
+ struct detailed_timing *timing)
+{
+ int i, j, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ struct cvt_timing *cvt;
+ const int rates[] = { 60, 85, 75, 60, 50 };
+ const u8 empty[3] = { 0, 0, 0 };
+
+ for (i = 0; i < 4; i++) {
+ int width, height;
+ cvt = &(timing->data.other_data.data.cvt[i]);
+
+ if (!memcmp(cvt->code, empty, 3))
+ continue;
+
+ height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
+ switch (cvt->code[1] & 0x0c) {
+ case 0x00:
+ width = height * 4 / 3;
+ break;
+ case 0x04:
+ width = height * 16 / 9;
+ break;
+ case 0x08:
+ width = height * 16 / 10;
+ break;
+ case 0x0c:
+ width = height * 15 / 9;
+ break;
+ }
+
+ for (j = 1; j < 5; j++) {
+ if (cvt->code[2] & (1 << j)) {
+ newmode = drm_cvt_mode(dev, width, height,
+ rates[j], j == 0,
+ false, false);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
+static void
+do_cvt_mode(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+
+ if (data->type == EDID_DETAIL_CVT_3BYTE)
+ closure->modes += drm_cvt_modes(closure->connector, timing);
+}
+
+static int
+add_cvt_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ if (version_greater(edid, 1, 2))
+ drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure);
+
+ /* XXX should also look for CVT codes in VTB blocks */
+
+ return closure.modes;
+}
+
+static void
+do_detailed_mode(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct drm_display_mode *newmode;
+
+ if (timing->pixel_clock) {
+ newmode = drm_mode_detailed(closure->connector->dev,
+ closure->edid, timing,
+ closure->quirks);
+ if (!newmode)
+ return;
+
+ if (closure->preferred)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(closure->connector, newmode);
+ closure->modes++;
+ closure->preferred = 0;
+ }
+}
+
+/*
+ * add_detailed_modes - Add modes from detailed timings
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+ */
+static int
+add_detailed_modes(struct drm_connector *connector, struct edid *edid,
+ u32 quirks)
+{
+ struct detailed_mode_closure closure = {
+ connector,
+ edid,
+ 1,
+ quirks,
+ 0
+ };
+
+ if (closure.preferred && !version_greater(edid, 1, 3))
+ closure.preferred =
+ (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+
+ drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure);
+
+ return closure.modes;
+}
+
+#define HDMI_IDENTIFIER 0x000C03
+#define AUDIO_BLOCK 0x01
+#define VIDEO_BLOCK 0x02
+#define VENDOR_BLOCK 0x03
+#define SPEAKER_BLOCK 0x04
+#define VIDEO_CAPABILITY_BLOCK 0x07
+#define EDID_BASIC_AUDIO (1 << 6)
+#define EDID_CEA_YCRCB444 (1 << 5)
+#define EDID_CEA_YCRCB422 (1 << 4)
+#define EDID_CEA_VCDB_QS (1 << 6)
+
+/**
+ * Search EDID for CEA extension block.
+ */
+u8 *drm_find_cea_extension(struct edid *edid)
+{
+ u8 *edid_ext = NULL;
+ int i;
+
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+ return NULL;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == CEA_EXT)
+ break;
+ }
+
+ if (i == edid->extensions)
+ return NULL;
+
+ return edid_ext;
+}
+
+/*
+ * Calculate the alternate clock for the CEA mode
+ * (60Hz vs. 59.94Hz etc.)
+ */
+static unsigned int
+cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
+{
+ unsigned int clock = cea_mode->clock;
+
+ if (cea_mode->vrefresh % 6 != 0)
+ return clock;
+
+ /*
+ * edid_cea_modes contains the 59.94Hz
+ * variant for 240 and 480 line modes,
+ * and the 60Hz variant otherwise.
+ */
+ if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480)
+ clock = clock * 1001 / 1000;
+ else
+ clock = DIV_ROUND_UP(clock * 1000, 1001);
+
+ return clock;
+}
+
+/**
+ * drm_match_cea_mode - look for a CEA mode matching given mode
+ * @to_match: display mode
+ *
+ * Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
+ * mode.
+ */
+u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
+{
+ u8 mode;
+
+ if (!to_match->clock)
+ return 0;
+
+ for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
+ const struct drm_display_mode *cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
+ unsigned int clock1, clock2;
+
+ /* Check both 60Hz and 59.94Hz */
+ clock1 = cea_mode->clock;
+ clock2 = cea_mode_alternate_clock(cea_mode);
+
+ if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
+ KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
+ drm_mode_equal_no_clocks(to_match, cea_mode))
+ return mode + 1;
+ }
+ return 0;
+}
+
+static int
+add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *tmp;
+ struct list_head list;
+
+ INIT_LIST_HEAD(&list);
+
+ int modes = 0;
+
+ /* Don't add CEA modes if the CEA extension block is missing */
+ if (!drm_find_cea_extension(edid))
+ return 0;
+
+ /*
+ * Go through all probed modes and create a new mode
+ * with the alternate clock for certain CEA modes.
+ */
+ list_for_each_entry(mode, struct drm_display_mode, &connector->probed_modes, head) {
+ const struct drm_display_mode *cea_mode;
+ struct drm_display_mode *newmode;
+ u8 cea_mode_idx = drm_match_cea_mode(mode) - 1;
+ unsigned int clock1, clock2;
+
+ if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes))
+ continue;
+
+ cea_mode = &edid_cea_modes[cea_mode_idx];
+
+ clock1 = cea_mode->clock;
+ clock2 = cea_mode_alternate_clock(cea_mode);
+
+ if (clock1 == clock2)
+ continue;
+
+ if (mode->clock != clock1 && mode->clock != clock2)
+ continue;
+
+ newmode = drm_mode_duplicate(dev, cea_mode);
+ if (!newmode)
+ continue;
+
+ /*
+ * The current mode could be either variant. Make
+ * sure to pick the "other" clock for the new mode.
+ */
+ if (mode->clock != clock1)
+ newmode->clock = clock1;
+ else
+ newmode->clock = clock2;
+
+ list_add_tail(&newmode->head, &list, (caddr_t)newmode);
+ }
+
+ list_for_each_entry_safe(mode, tmp, struct drm_display_mode, &list, head) {
+ list_del(&mode->head);
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
+
+ return modes;
+}
+
+static int
+do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
+{
+ struct drm_device *dev = connector->dev;
+ u8 * mode, cea_mode;
+ int modes = 0;
+
+ for (mode = db; mode < db + len; mode++) {
+ cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
+ if (cea_mode < ARRAY_SIZE(edid_cea_modes)) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(dev,
+ &edid_cea_modes[cea_mode]);
+ if (newmode) {
+ newmode->vrefresh = 0;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ return modes;
+}
+
+static int
+cea_db_payload_len(const u8 *db)
+{
+ return db[0] & 0x1f;
+}
+
+static int
+cea_db_tag(const u8 *db)
+{
+ return db[0] >> 5;
+}
+
+static int
+cea_revision(const u8 *cea)
+{
+ return cea[1];
+}
+
+static int
+cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+ /* Data block offset in CEA extension block */
+ *start = 4;
+ *end = cea[2];
+ if (*end == 0)
+ *end = 127;
+ if (*end < 4 || *end > 127)
+ return -ERANGE;
+ return 0;
+}
+
+#define for_each_cea_db(cea, i, start, end) \
+ for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
+
+static int
+add_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+ u8 * cea = drm_find_cea_extension(edid);
+ u8 * db, dbl;
+ int modes = 0;
+
+ if (cea && cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end))
+ return 0;
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ dbl = cea_db_payload_len(db);
+
+ if (cea_db_tag(db) == VIDEO_BLOCK)
+ modes += do_cea_modes (connector, db+1, dbl);
+ }
+ }
+
+ return modes;
+}
+
+static void
+parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
+{
+ u8 len = cea_db_payload_len(db);
+
+ if (len >= 6) {
+ connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
+ connector->dvi_dual = db[6] & 1;
+ }
+ if (len >= 7)
+ connector->max_tmds_clock = db[7] * 5;
+ if (len >= 8) {
+ connector->latency_present[0] = db[8] >> 7;
+ connector->latency_present[1] = (db[8] >> 6) & 1;
+ }
+ if (len >= 9)
+ connector->video_latency[0] = db[9];
+ if (len >= 10)
+ connector->audio_latency[0] = db[10];
+ if (len >= 11)
+ connector->video_latency[1] = db[11];
+ if (len >= 12)
+ connector->audio_latency[1] = db[12];
+
+ DRM_LOG_KMS("HDMI: DVI dual %d, "
+ "max TMDS clock %d, "
+ "latency present %d %d, "
+ "video latency %d %d, "
+ "audio latency %d %d\n",
+ connector->dvi_dual,
+ connector->max_tmds_clock,
+ (int) connector->latency_present[0],
+ (int) connector->latency_present[1],
+ connector->video_latency[0],
+ connector->video_latency[1],
+ connector->audio_latency[0],
+ connector->audio_latency[1]);
+}
+
+static void
+monitor_name(struct detailed_timing *t, void *data)
+{
+ if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
+ *(u8 **)data = t->data.other_data.data.str.str;
+}
+
+static bool cea_db_is_hdmi_vsdb(const u8 *db)
+{
+ int hdmi_id;
+
+ if (cea_db_tag(db) != VENDOR_BLOCK)
+ return false;
+
+ if (cea_db_payload_len(db) < 5)
+ return false;
+
+ hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+ return hdmi_id == HDMI_IDENTIFIER;
+}
+
+/**
+ * drm_edid_to_eld - build ELD from EDID
+ * @connector: connector corresponding to the HDMI/DP sink
+ * @edid: EDID to parse
+ *
+ * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver.
+ * Some ELD fields are left to the graphics driver caller:
+ * - Conn_Type
+ * - HDCP
+ * - Port_ID
+ */
+void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
+{
+ uint8_t *eld = connector->eld;
+ u8 *cea;
+ u8 *name;
+ u8 *db;
+ int sad_count = 0;
+ int mnl;
+ int dbl;
+
+ (void) memset(eld, 0, sizeof(connector->eld));
+
+ cea = drm_find_cea_extension(edid);
+ if (!cea) {
+ DRM_DEBUG_KMS("ELD: no CEA Extension found\n");
+ return;
+ }
+
+ name = NULL;
+ drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
+ for (mnl = 0; name && mnl < 13; mnl++) {
+ if (name[mnl] == 0x0a)
+ break;
+ eld[20 + mnl] = name[mnl];
+ }
+ eld[4] = (cea[1] << 5) | mnl;
+ DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20);
+
+ eld[0] = 2 << 3; /* ELD version: 2 */
+
+ eld[16] = edid->mfg_id[0];
+ eld[17] = edid->mfg_id[1];
+ eld[18] = edid->prod_code[0];
+ eld[19] = edid->prod_code[1];
+
+ if (cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end)) {
+ start = 0;
+ end = 0;
+ }
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ dbl = cea_db_payload_len(db);
+
+ switch (cea_db_tag(db)) {
+ case AUDIO_BLOCK:
+ /* Audio Data Block, contains SADs */
+ sad_count = dbl / 3;
+ if (dbl >= 1)
+ (void) memcpy(eld + 20 + mnl, &db[1], dbl);
+ break;
+ case SPEAKER_BLOCK:
+ /* Speaker Allocation Data Block */
+ if (dbl >= 1)
+ eld[7] = db[1];
+ break;
+ case VENDOR_BLOCK:
+ /* HDMI Vendor-Specific Data Block */
+ if (cea_db_is_hdmi_vsdb(db))
+ parse_hdmi_vsdb(connector, db);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ eld[5] |= sad_count << 4;
+ eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
+
+ DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
+}
+
+/**
+ * drm_edid_to_sad - extracts SADs from EDID
+ * @edid: EDID to parse
+ * @sads: pointer that will be set to the extracted SADs
+ *
+ * Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it.
+ * Note: returned pointer needs to be kfreed
+ *
+ * Return number of found SADs or negative number on error.
+ */
+int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
+{
+ int count = 0;
+ int i, start, end, dbl;
+ u8 *cea;
+
+ cea = drm_find_cea_extension(edid);
+ if (!cea) {
+ DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
+ return -ENOENT;
+ }
+
+ if (cea_revision(cea) < 3) {
+ DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
+ return -ENOTSUP;
+ }
+
+ if (cea_db_offsets(cea, &start, &end)) {
+ DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
+ return -EPROTO;
+ }
+
+ for_each_cea_db(cea, i, start, end) {
+ u8 *db = &cea[i];
+
+ if (cea_db_tag(db) == AUDIO_BLOCK) {
+ int j;
+ dbl = cea_db_payload_len(db);
+
+ count = dbl / 3; /* SAD is 3B */
+ *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL);
+ if (!*sads)
+ return -ENOMEM;
+ for (j = 0; j < count; j++) {
+ u8 *sad = &db[1 + j * 3];
+
+ (*sads)[j].format = (sad[0] & 0x78) >> 3;
+ (*sads)[j].channels = sad[0] & 0x7;
+ (*sads)[j].freq = sad[1] & 0x7F;
+ (*sads)[j].byte2 = sad[2];
+ }
+ break;
+ }
+ }
+
+ return count;
+}
+
+/**
+ * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
+ * @connector: connector associated with the HDMI/DP sink
+ * @mode: the display mode
+ */
+int drm_av_sync_delay(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ int a, v;
+
+ if (!connector->latency_present[0])
+ return 0;
+ if (!connector->latency_present[1])
+ i = 0;
+
+ a = connector->audio_latency[i];
+ v = connector->video_latency[i];
+
+ /*
+ * HDMI/DP sink doesn't support audio or video?
+ */
+ if (a == 255 || v == 255)
+ return 0;
+
+ /*
+ * Convert raw EDID values to millisecond.
+ * Treat unknown latency as 0ms.
+ */
+ if (a)
+ a = min(2 * (a - 1), 500);
+ if (v)
+ v = min(2 * (v - 1), 500);
+
+ return max(v - a, 0);
+}
+
+/**
+ * drm_select_eld - select one ELD from multiple HDMI/DP sinks
+ * @encoder: the encoder just changed display mode
+ * @mode: the adjusted display mode
+ *
+ * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
+ * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
+ */
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+ /* LINTED E_FUNC_ARG_UNUSED */
+ struct drm_display_mode *mode)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder && connector->eld[0])
+ return connector;
+
+ return NULL;
+}
+
+/**
+ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+ * @edid: monitor EDID information
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI, false if not or unknown.
+ */
+bool drm_detect_hdmi_monitor(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i;
+ int start_offset, end_offset;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return false;
+
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+ return false;
+
+ /*
+ * Because HDMI identifier is in Vendor Specific Block,
+ * search it from all data blocks of CEA extension.
+ */
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * drm_detect_monitor_audio - check monitor audio capability
+ *
+ * Monitor should have CEA extension block.
+ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
+ * audio' only. If there is any audio extension block and supported
+ * audio format, assume at least 'basic audio' support, even if 'basic
+ * audio' is not defined in EDID.
+ *
+ */
+bool drm_detect_monitor_audio(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, j;
+ bool has_audio = false;
+ int start_offset, end_offset;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ goto end;
+
+ has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
+
+ if (has_audio) {
+ DRM_DEBUG_KMS("Monitor has basic audio support\n");
+ goto end;
+ }
+
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+ goto end;
+
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
+ has_audio = true;
+ for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
+ DRM_DEBUG_KMS("CEA audio format %d\n",
+ (edid_ext[i + j] >> 3) & 0xf);
+ goto end;
+ }
+ }
+end:
+ return has_audio;
+}
+
+/**
+ * drm_rgb_quant_range_selectable - is RGB quantization range selectable?
+ *
+ * Check whether the monitor reports the RGB quantization range selection
+ * as supported. The AVI infoframe can then be used to inform the monitor
+ * which quantization range (full or limited) is used.
+ */
+bool drm_rgb_quant_range_selectable(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, start, end;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return false;
+
+ if (cea_db_offsets(edid_ext, &start, &end))
+ return false;
+
+ for_each_cea_db(edid_ext, i, start, end) {
+ if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK &&
+ cea_db_payload_len(&edid_ext[i]) == 2) {
+ DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
+ return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * drm_add_display_info - pull display info out if present
+ * @edid: EDID data
+ * @info: display info (attached to connector)
+ *
+ * Grab any available display info and stuff it into the drm_display_info
+ * structure that's part of the connector. Useful for tracking bpp and
+ * color spaces.
+ */
+static void drm_add_display_info(struct edid *edid,
+ struct drm_display_info *info)
+{
+ u8 *edid_ext;
+
+ info->width_mm = edid->width_cm * 10;
+ info->height_mm = edid->height_cm * 10;
+
+ /* driver figures it out in this case */
+ info->bpc = 0;
+ info->color_formats = 0;
+
+ if (edid->revision < 3)
+ return;
+
+ if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
+ return;
+
+ /* Get data from CEA blocks if present */
+ edid_ext = drm_find_cea_extension(edid);
+ if (edid_ext) {
+ info->cea_rev = edid_ext[1];
+
+ /* The existence of a CEA block should imply RGB support */
+ info->color_formats = DRM_COLOR_FORMAT_RGB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+ }
+
+ /* Only defined for 1.4 with digital displays */
+ if (edid->revision < 4)
+ return;
+
+ switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
+ case DRM_EDID_DIGITAL_DEPTH_6:
+ info->bpc = 6;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_8:
+ info->bpc = 8;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_10:
+ info->bpc = 10;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_12:
+ info->bpc = 12;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_14:
+ info->bpc = 14;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_16:
+ info->bpc = 16;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_UNDEF:
+ default:
+ info->bpc = 0;
+ break;
+ }
+
+ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+}
+
+/**
+ * drm_add_edid_modes - add modes from EDID data, if available
+ * @connector: connector we're probing
+ * @edid: edid data
+ *
+ * Add the specified modes to the connector's mode list.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+{
+ int num_modes = 0;
+ u32 quirks;
+
+ if (edid == NULL) {
+ return 0;
+ }
+ if (!drm_edid_is_valid(edid)) {
+ DRM_ERROR("%s: EDID invalid.\n",
+ drm_get_connector_name(connector));
+ return 0;
+ }
+
+ quirks = edid_get_quirks(edid);
+
+ /*
+ * EDID spec says modes should be preferred in this order:
+ * - preferred detailed mode
+ * - other detailed modes from base block
+ * - detailed modes from extension blocks
+ * - CVT 3-byte code modes
+ * - standard timing codes
+ * - established timing codes
+ * - modes inferred from GTF or CVT range information
+ *
+ * We get this pretty much right.
+ *
+ * XXX order for additional mode types in extension blocks?
+ */
+ num_modes += add_detailed_modes(connector, edid, quirks);
+ num_modes += add_cvt_modes(connector, edid);
+ num_modes += add_standard_modes(connector, edid);
+ num_modes += add_established_modes(connector, edid);
+ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ num_modes += add_inferred_modes(connector, edid);
+ num_modes += add_cea_modes(connector, edid);
+ num_modes += add_alternate_cea_modes(connector, edid);
+
+ if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ edid_fixup_preferred(connector, quirks);
+
+ drm_add_display_info(edid, &connector->display_info);
+
+ return num_modes;
+}
+
+/**
+ * drm_add_modes_noedid - add modes for the connectors without EDID
+ * @connector: connector we're probing
+ * @hdisplay: the horizontal display limit
+ * @vdisplay: the vertical display limit
+ *
+ * Add the specified modes to the connector's mode list. Only when the
+ * hdisplay/vdisplay is not beyond the given limit, it will be added.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_modes_noedid(struct drm_connector *connector,
+ int hdisplay, int vdisplay)
+{
+ int i, count, num_modes = 0;
+ struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+
+ count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+ if (hdisplay < 0)
+ hdisplay = 0;
+ if (vdisplay < 0)
+ vdisplay = 0;
+
+ for (i = 0; i < count; i++) {
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
+ if (hdisplay && vdisplay) {
+ /*
+ * Only when two are valid, they will be used to check
+ * whether the mode should be added to the mode list of
+ * the connector.
+ */
+ if (ptr->hdisplay > hdisplay ||
+ ptr->vdisplay > vdisplay)
+ continue;
+ }
+ if (drm_mode_vrefresh(ptr) > 61)
+ continue;
+ mode = drm_mode_duplicate(dev, ptr);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+ }
+ return num_modes;
+}
+
diff --git a/usr/src/uts/common/io/drm/drm_fb_helper.c b/usr/src/uts/common/io/drm/drm_fb_helper.c
new file mode 100644
index 0000000..a8078bf
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_fb_helper.c
@@ -0,0 +1,958 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008, 2013, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+#include "drm_crtc_helper.h"
+
+
+struct list_head kernel_fb_helper_list;
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independantely from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ *
+ * Initialization is done as a three-step process with drm_fb_helper_init(),
+ * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
+ * Drivers with fancier requirements than the default beheviour can override the
+ * second step with their own code. Teardown is done with drm_fb_helper_fini().
+ *
+ * At runtime drivers should restore the fbdev console by calling
+ * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
+ * should also notify the fb helper code from updates to the output
+ * configuration by calling drm_fb_helper_hotplug_event(). For easier
+ * integration with the output polling code in drm_crtc_helper.c the modeset
+ * code proves a ->output_poll_changed callback.
+ *
+ * All other functions exported by the fb helper library can be used to
+ * implement the fbdev driver interface by the driver.
+ */
+
+/* simple single crtc case helper function
+ * drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
+ * emulation helper
+ * @fb_helper: fbdev initialized with drm_fb_helper_init
+ *
+ * This functions adds all the available connectors for use with the given
+ * fb_helper. This is a separate step to allow drivers to freely assign
+ * connectors to the fbdev, e.g. if some are reserved for special purposes or
+ * not adequate to be used for the fbcon.
+ *
+ * Since this is part of the initial setup before the fbdev is published, no
+ * locking is required.
+ */
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ int i;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ struct drm_fb_helper_connector *fb_helper_connector;
+
+ fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+ if (!fb_helper_connector)
+ goto fail;
+
+ fb_helper_connector->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+ }
+ return 0;
+fail:
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ kfree(fb_helper->connector_info[i], sizeof(struct drm_fb_helper_connector));
+ fb_helper->connector_info[i] = NULL;
+ }
+ fb_helper->connector_count = 0;
+ return -ENOMEM;
+}
+
+static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ struct gfxp_bm_fb_info fb_info;
+ int i;
+
+ gfxp_bm_getfb_info(dev->vgatext->private, &fb_info);
+ for (i = 0; i < fb_helper->connector_count; i++) {
+
+ fb_helper_conn = fb_helper->connector_info[i];
+
+ struct drm_cmdline_mode *cmdline_mode;
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+ cmdline_mode->specified = true;
+ cmdline_mode->xres = fb_info.xres;
+ cmdline_mode->yres = fb_info.yres;
+ cmdline_mode->refresh_specified = true;
+ cmdline_mode->refresh = 60;
+ cmdline_mode->bpp_specified = true;
+ cmdline_mode->bpp = fb_info.depth;
+ }
+ return 0;
+}
+
+static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
+{
+ uint16_t *r_base, *g_base, *b_base;
+ int i;
+
+ if (helper->funcs->gamma_get == NULL)
+ return;
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + crtc->gamma_size;
+ b_base = g_base + crtc->gamma_size;
+
+ for (i = 0; i < crtc->gamma_size; i++)
+ helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
+}
+
+static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
+{
+ uint16_t *r_base, *g_base, *b_base;
+
+ if (crtc->funcs->gamma_set == NULL)
+ return;
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + crtc->gamma_size;
+ b_base = g_base + crtc->gamma_size;
+
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+}
+
+/* Find the real fb for a given fb helper CRTC */
+static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *c;
+
+ list_for_each_entry(c, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->base.id == c->base.id)
+ return c->fb;
+ }
+
+ return NULL;
+}
+
+bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev;
+ struct drm_plane *plane;
+ bool error = false;
+ int i;
+
+ if (fb_helper == NULL)
+ return error;
+
+ dev = fb_helper->dev;
+
+ list_for_each_entry(plane, struct drm_plane, &dev->mode_config.plane_list, head)
+ drm_plane_force_disable(plane);
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+ struct drm_crtc *crtc = mode_set->crtc;
+ int ret;
+
+ if (crtc->funcs->cursor_set) {
+ ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
+ if (ret)
+ error = true;
+ }
+
+ ret = drm_mode_set_config_internal(mode_set);
+ if (ret)
+ error = true;
+ }
+ return error;
+}
+
+bool drm_fb_helper_force_kernel_mode(void)
+{
+ bool ret, error = false;
+ struct drm_fb_helper *helper;
+
+ if (list_empty(&kernel_fb_helper_list))
+ return false;
+
+ list_for_each_entry(helper, struct drm_fb_helper, &kernel_fb_helper_list, kernel_fb_list) {
+ if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ continue;
+
+ ret = drm_fb_helper_restore_fbdev_mode(helper);
+ if (ret)
+ error = true;
+ }
+ return error;
+}
+
+static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc *crtc;
+ int bound = 0, crtcs_bound = 0;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb)
+ crtcs_bound++;
+ if (crtc->fb == fb_helper->fb)
+ bound++;
+ }
+
+ if (bound < crtcs_bound)
+ return false;
+ return true;
+}
+
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
+{
+ int i;
+ struct drm_device *dev = helper->dev;
+
+ kfree(helper->connector_info, dev->mode_config.num_connector * sizeof(struct drm_fb_helper_connector *));
+ for (i = 0; i < helper->crtc_count; i++) {
+ kfree(helper->crtc_info[i].mode_set.connectors, INTELFB_CONN_LIMIT * sizeof(struct drm_connector *));
+ if (helper->crtc_info[i].mode_set.mode)
+ drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
+ }
+ kfree(helper->crtc_info, helper->crtc_count * sizeof(struct drm_fb_helper_crtc));
+}
+
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper,
+ int crtc_count, int max_conn_count)
+{
+ struct drm_crtc *crtc;
+ int i;
+
+ fb_helper->dev = dev;
+
+ INIT_LIST_HEAD(&kernel_fb_helper_list);
+ INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
+
+ fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+ if (!fb_helper->crtc_info)
+ return -ENOMEM;
+
+ fb_helper->crtc_count = crtc_count;
+ fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
+ if (!fb_helper->connector_info) {
+ kfree(fb_helper->crtc_info, sizeof(struct drm_fb_helper_crtc));
+ return -ENOMEM;
+ }
+ fb_helper->connector_count = 0;
+
+ for (i = 0; i < crtc_count; i++) {
+ fb_helper->crtc_info[i].mode_set.connectors =
+ kcalloc(max_conn_count,
+ sizeof(struct drm_connector *),
+ GFP_KERNEL);
+
+ if (!fb_helper->crtc_info[i].mode_set.connectors)
+ goto out_free;
+ fb_helper->crtc_info[i].mode_set.num_connectors = 0;
+ }
+
+ i = 0;
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ fb_helper->crtc_info[i].mode_set.crtc = crtc;
+ i++;
+ }
+
+ return 0;
+out_free:
+ drm_fb_helper_crtc_free(fb_helper);
+ return -ENOMEM;
+}
+
+void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+{
+ if (!list_empty(&fb_helper->kernel_fb_list)) {
+ list_del(&fb_helper->kernel_fb_list);
+ if (list_empty(&kernel_fb_helper_list)) {
+ DRM_INFO("drm: unregistered panic notifier");
+ }
+ }
+
+ drm_fb_helper_crtc_free(fb_helper);
+
+}
+
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
+{
+ int ret = 0;
+ int crtc_count = 0;
+ int i;
+ struct drm_fb_helper_surface_size sizes;
+ int gamma_size = 0;
+
+ (void) memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
+ sizes.fb_width = (unsigned)-1;
+ sizes.fb_height = (unsigned)-1;
+
+ /* if driver picks 8 or 16 by default use that
+ for both depth/bpp */
+ if (preferred_bpp != sizes.surface_bpp)
+ sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
+
+ /* first up get a count of crtcs now in use and new min/maxes width/heights */
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
+ struct drm_cmdline_mode *cmdline_mode;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+
+ if (cmdline_mode->bpp_specified) {
+ switch (cmdline_mode->bpp) {
+ case 8:
+ sizes.surface_depth = sizes.surface_bpp = 8;
+ break;
+ case 15:
+ sizes.surface_depth = 15;
+ sizes.surface_bpp = 16;
+ break;
+ case 16:
+ sizes.surface_depth = sizes.surface_bpp = 16;
+ break;
+ case 24:
+ sizes.surface_depth = sizes.surface_bpp = 24;
+ break;
+ case 32:
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
+ break;
+ }
+ break;
+ }
+ }
+
+ crtc_count = 0;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_display_mode *desired_mode;
+ desired_mode = fb_helper->crtc_info[i].desired_mode;
+
+ if (desired_mode) {
+ if (gamma_size == 0)
+ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+ if (desired_mode->hdisplay < sizes.fb_width)
+ sizes.fb_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay < sizes.fb_height)
+ sizes.fb_height = desired_mode->vdisplay;
+ if (desired_mode->hdisplay > sizes.surface_width)
+ sizes.surface_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay > sizes.surface_height)
+ sizes.surface_height = desired_mode->vdisplay;
+ crtc_count++;
+ }
+ }
+
+ if (crtc_count == 0 || sizes.fb_width == (unsigned)-1 || sizes.fb_height == (unsigned)-1) {
+ /* hmm everyone went away - assume VGA cable just fell out
+ and will come back later. */
+ DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
+ sizes.fb_width = sizes.surface_width = 1024;
+ sizes.fb_height = sizes.surface_height = 768;
+ }
+
+ /* push down into drivers */
+ ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Set the fb pointer - usually drm_setup_crtcs does this for hotplug
+ * events, but at init time drm_setup_crtcs needs to be called before
+ * the fb is allocated (since we need to figure out the desired size of
+ * the fb before we can allocate it ...). Hence we need to fix things up
+ * here again.
+ */
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ if (fb_helper->crtc_info[i].mode_set.num_connectors)
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
+
+
+ /* Switch back to kernel console on panic */
+ /* multi card linked list maybe */
+ if (list_empty(&kernel_fb_helper_list)) {
+ DRM_INFO("registered panic notifier");
+ }
+
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list, (caddr_t)fb_helper);
+
+ return 0;
+}
+
+static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
+ uint32_t maxX,
+ uint32_t maxY)
+{
+ struct drm_connector *connector;
+ int count = 0;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ count += connector->funcs->fill_modes(connector, maxX, maxY);
+ }
+
+ return count;
+}
+
+static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, struct drm_display_mode, &fb_connector->connector->modes, head) {
+ if (drm_mode_width(mode) > width ||
+ drm_mode_height(mode) > height)
+ continue;
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ return mode;
+ }
+ return NULL;
+}
+
+static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+{
+ struct drm_cmdline_mode *cmdline_mode;
+ cmdline_mode = &fb_connector->cmdline_mode;
+ return cmdline_mode->specified;
+}
+
+static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+ /* LINTED E_FUNC_ARG_UNUSED */
+ int width, int height)
+{
+ struct drm_cmdline_mode *cmdline_mode;
+ struct drm_display_mode *mode = NULL;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+ if (cmdline_mode->specified == false)
+ return mode;
+
+ /* attempt to find a matching mode in the list of modes
+ * we have gotten so far, if not add a CVT mode that conforms
+ */
+ if (cmdline_mode->rb || cmdline_mode->margins)
+ goto create_mode;
+
+ list_for_each_entry(mode, struct drm_display_mode, &fb_helper_conn->connector->modes, head) {
+ /* check width/height */
+ if (mode->hdisplay != cmdline_mode->xres ||
+ mode->vdisplay != cmdline_mode->yres)
+ continue;
+
+ if (cmdline_mode->refresh_specified) {
+ if (mode->vrefresh != cmdline_mode->refresh)
+ continue;
+ }
+
+ if (cmdline_mode->interlace) {
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+ }
+ return mode;
+ }
+
+create_mode:
+ mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
+ cmdline_mode);
+ if (mode)
+ list_add(&mode->head, &fb_helper_conn->connector->modes,
+ (caddr_t)mode);
+ return mode;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+ bool enable;
+
+ if (strict)
+ enable = connector->status == connector_status_connected;
+ else
+ enable = connector->status != connector_status_disconnected;
+
+ return enable;
+}
+
+static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
+ bool *enabled)
+{
+ bool any_enabled = false;
+ struct drm_connector *connector;
+ int i = 0;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, true);
+ DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+ enabled[i] ? "yes" : "no");
+ any_enabled |= enabled[i];
+ }
+
+ if (any_enabled)
+ return;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, false);
+ }
+}
+
+static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ int count, i, j;
+ bool can_clone = false;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ struct drm_display_mode *dmt_mode, *mode;
+
+ /* only contemplate cloning in the single crtc case */
+ if (fb_helper->crtc_count > 1)
+ return false;
+
+ count = 0;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (enabled[i])
+ count++;
+ }
+
+ /* only contemplate cloning if more than one connector is enabled */
+ if (count <= 1)
+ return false;
+
+ /* check the command line or if nothing common pick 1024x768 */
+ can_clone = true;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (!enabled[i])
+ continue;
+ fb_helper_conn = fb_helper->connector_info[i];
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ can_clone = false;
+ break;
+ }
+ for (j = 0; j < i; j++) {
+ if (!enabled[j])
+ continue;
+ if (!drm_mode_equal(modes[j], modes[i]))
+ can_clone = false;
+ }
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using command line\n");
+ return true;
+ }
+
+ /* try and find a 1024x768 mode on each connector */
+ can_clone = true;
+ dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false);
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+
+ if (!enabled[i])
+ continue;
+
+ fb_helper_conn = fb_helper->connector_info[i];
+ list_for_each_entry(mode, struct drm_display_mode, &fb_helper_conn->connector->modes, head) {
+ if (drm_mode_equal(mode, dmt_mode))
+ modes[i] = mode;
+ }
+ if (!modes[i])
+ can_clone = false;
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using 1024x768\n");
+ return true;
+ }
+ DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+ return false;
+}
+
+static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ fb_helper_conn = fb_helper->connector_info[i];
+
+ if (enabled[i] == false)
+ continue;
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+
+ /* got for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+ modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+ }
+ /* No preferred modes, pick one off the list */
+ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
+ list_for_each_entry(modes[i], struct drm_display_mode, &fb_helper_conn->connector->modes, head)
+ /* LINTED */
+ { if (1) break; }
+ }
+ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+ "none");
+ }
+ return true;
+}
+
+static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **best_crtcs,
+ struct drm_display_mode **modes,
+ int n, int width, int height)
+{
+ int c, o;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ struct drm_connector_helper_funcs *connector_funcs;
+ struct drm_encoder *encoder;
+ int my_score, best_score, score;
+ struct drm_fb_helper_crtc **crtcs, *crtc;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int num_connector = dev->mode_config.num_connector;
+
+ if (n == fb_helper->connector_count)
+ return 0;
+
+ ASSERT(n <= num_connector);
+
+ fb_helper_conn = fb_helper->connector_info[n];
+ connector = fb_helper_conn->connector;
+
+ best_crtcs[n] = NULL;
+ best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
+ if (modes[n] == NULL)
+ return best_score;
+
+ crtcs = kzalloc(num_connector * sizeof(struct drm_fb_helper_crtc *),
+ GFP_KERNEL);
+ if (!crtcs)
+ return best_score;
+
+ my_score = 1;
+ if (connector->status == connector_status_connected)
+ my_score++;
+ if (drm_has_cmdline_mode(fb_helper_conn))
+ my_score++;
+ if (drm_has_preferred_mode(fb_helper_conn, width, height))
+ my_score++;
+
+ connector_funcs = connector->helper_private;
+ encoder = connector_funcs->best_encoder(connector);
+ if (!encoder)
+ goto out;
+
+ /* select a crtc for this connector and then attempt to configure
+ remaining connectors */
+ for (c = 0; c < fb_helper->crtc_count; c++) {
+ crtc = &fb_helper->crtc_info[c];
+
+ if ((encoder->possible_crtcs & (1 << c)) == 0)
+ continue;
+
+ for (o = 0; o < n; o++)
+ if (best_crtcs[o] == crtc)
+ break;
+
+ if (o < n) {
+ /* ignore cloning unless only a single crtc */
+ if (fb_helper->crtc_count > 1)
+ continue;
+
+ if (!drm_mode_equal(modes[o], modes[n]))
+ continue;
+ }
+
+ crtcs[n] = crtc;
+ (void) memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
+ score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
+ width, height);
+ if (score > best_score) {
+ best_score = score;
+ (void) memcpy(best_crtcs, crtcs,
+ num_connector *
+ sizeof(struct drm_fb_helper_crtc *));
+ }
+ }
+out:
+ kfree(crtcs, (num_connector * sizeof(struct drm_fb_helper_crtc *)));
+ return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_crtc **crtcs;
+ struct drm_display_mode **modes;
+ struct drm_mode_set *modeset;
+ bool *enabled;
+ int width, height;
+ int i, num_connector = dev->mode_config.num_connector;
+
+ DRM_DEBUG_KMS("\n");
+
+ width = dev->mode_config.max_width;
+ height = dev->mode_config.max_height;
+
+ if ((crtcs = kcalloc(num_connector,
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL)) == NULL) {
+ DRM_ERROR("Memory allocation failed for crtcs\n");
+ return;
+ }
+ if ((modes = kcalloc(num_connector,
+ sizeof(struct drm_display_mode *), GFP_KERNEL)) == NULL) {
+ DRM_ERROR("Memory allocation failed for modes\n");
+ goto errout1;
+ }
+ if ((enabled = kcalloc(num_connector,
+ sizeof(bool), GFP_KERNEL)) == NULL) {
+ DRM_ERROR("Memory allocation failed for enabled\n");
+ goto errout2;
+ }
+
+
+ drm_enable_connectors(fb_helper, enabled);
+
+ if (!(fb_helper->funcs->initial_config &&
+ fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
+ enabled, width, height))) {
+ (void) memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
+ (void) memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
+
+ if (!drm_target_cloned(fb_helper,
+ modes, enabled, width, height) &&
+ !drm_target_preferred(fb_helper,
+ modes, enabled, width, height))
+ DRM_ERROR("Unable to find initial modes\n");
+
+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
+ width, height);
+
+ (void) drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+ }
+
+ /* need to set the modesets up here for use later */
+ /* fill out the connector<->crtc mappings into the modesets */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ modeset->num_connectors = 0;
+ modeset->fb = NULL;
+ }
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_display_mode *mode = modes[i];
+ struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+ modeset = &fb_crtc->mode_set;
+
+ if (mode && fb_crtc) {
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+ mode->name, fb_crtc->mode_set.crtc->base.id);
+ fb_crtc->desired_mode = mode;
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev,
+ fb_crtc->desired_mode);
+ modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ modeset->fb = fb_helper->fb;
+ }
+ }
+
+ /* Clear out any old modes if there are no more connected outputs. */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ if (modeset->num_connectors == 0) {
+ BUG_ON(modeset->fb);
+ BUG_ON(modeset->num_connectors);
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = NULL;
+ }
+ }
+
+ kfree(enabled, num_connector * sizeof(bool));
+errout2:
+ kfree(modes, num_connector * sizeof(struct drm_display_mode *));
+errout1:
+ kfree(crtcs, num_connector * sizeof(struct drm_fb_helper_crtc *));
+}
+
+/**
+ * drm_helper_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+ *
+ *
+ * Scan the CRTCs and connectors and try to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * Note that this also registers the fbdev and so allows userspace to call into
+ * the driver through the fbdev interfaces.
+ *
+ * This function will call down into the ->fb_probe callback to let
+ * the driver allocate and initialize the fbdev info structure and the drm
+ * framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
+ * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
+ * values for the fbdev info structure.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+{
+ struct drm_device *dev = fb_helper->dev;
+ int count = 0;
+
+ (void) drm_fb_helper_parse_command_line(fb_helper);
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ /*
+ * we shouldn't end up with no modes here.
+ */
+ if (count == 0) {
+ DRM_INFO("No connectors reported connected with modes");
+ }
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+
+
+/**
+ * drm_fb_helper_hotplug_event - respond to a hotplug notification by
+ * probing all the outputs attached to the fb.
+ * @fb_helper: the drm_fb_helper
+ *
+ * Scan the connectors attached to the fb_helper and try to put together a
+ * setup after *notification of a change in output configuration.
+ *
+ * Called at runtime, takes the mode config locks to be able to check/change the
+ * modeset configuration. Must be run from process context (which usually means
+ * either the output polling work or a work item launched from the driver's
+ * hotplug interrupt).
+ *
+ * Scan the connectors attached to the fb_helper and try to put together a
+ * setup after *notification of a change in output configuration.
+ *
+ * RETURNS:
+ * 0 on success and a non-zero error code otherwise.
+ */
+int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ u32 max_width, max_height, bpp_sel;
+
+ if (!fb_helper->fb)
+ return 0;
+
+ mutex_lock(&fb_helper->dev->mode_config.mutex);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ fb_helper->delayed_hotplug = true;
+ mutex_unlock(&fb_helper->dev->mode_config.mutex);
+ return 0;
+ }
+ DRM_DEBUG_KMS("\n");
+
+ max_width = fb_helper->fb->width;
+ max_height = fb_helper->fb->height;
+ bpp_sel = fb_helper->fb->bits_per_pixel;
+
+ (void) drm_fb_helper_probe_connector_modes(fb_helper, max_width,
+ max_height);
+ mutex_unlock(&fb_helper->dev->mode_config.mutex);
+
+ drm_modeset_lock_all(dev);
+ drm_setup_crtcs(fb_helper);
+ drm_modeset_unlock_all(dev);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+
+int drm_gfxp_setmode(int mode)
+{
+ bool ret = 0;
+ if (mode == 0) {
+ ret = drm_fb_helper_force_kernel_mode();
+ if (ret == true)
+ DRM_ERROR("Failed to restore crtc configuration\n");
+ }
+ if (mode == 1)
+ DRM_DEBUG_KMS("do nothing in entervt");
+ return ret;
+}
+
+struct gfxp_blt_ops drm_gfxp_ops = {
+ NULL, /* blt */
+ NULL, /* copy */
+ NULL, /* clear */
+ drm_gfxp_setmode, /* setmode */
+};
+
+void drm_register_fbops(struct drm_device *dev)
+{
+ gfxp_bm_register_fbops(dev->vgatext->private, &drm_gfxp_ops);
+}
+
+int drm_getfb_size(struct drm_device *dev)
+{
+ struct gfxp_bm_fb_info fb_info;
+ int size, pitch;
+ gfxp_bm_getfb_info(dev->vgatext->private, &fb_info);
+ pitch = ALIGN(fb_info.xres * ((fb_info.depth + 7) / 8), 64);
+ size = ALIGN(pitch *fb_info.yres, PAGE_SIZE);
+ return size;
+}
diff --git a/usr/src/uts/common/io/drm/drm_fops.c b/usr/src/uts/common/io/drm/drm_fops.c
index da61e4d..58d42f6 100644
--- a/usr/src/uts/common/io/drm/drm_fops.c
+++ b/usr/src/uts/common/io/drm/drm_fops.c
@@ -1,17 +1,22 @@
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*/
-/* BEGIN CSTYLED */
+/**
+ * \file drm_fops.c
+ * File operations for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Daryll Strauss <daryll@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
-/* drm_fops.h -- File operations for DRM -*- linux-c -*-
+/*
* Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
- */
-/*-
+ *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2013, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,100 +37,471 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+static int drm_open_helper(struct drm_minor *minor,
+ int clone_id, int flags, cred_t *credp);
+
+static __inline__ int drm_device_is_agp(struct drm_device *dev)
+{
+ if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
+ return 0;
+ if (dev->driver->device_is_agp != NULL) {
+ int err = (*dev->driver->device_is_agp) (dev);
+
+ if (err != 2) {
+ return err;
+ }
+ }
+
+ return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
+}
+
+static int drm_setup(struct drm_device * dev)
+{
+ int i;
+ int ret;
+ static bool first_call = true;
+
+ if (first_call) {
+ /* OSOL_drm: moved from drm_fill_in_dev */
+ if (drm_core_has_AGP(dev)) {
+ if (drm_device_is_agp(dev))
+ dev->agp = drm_agp_init(dev);
+ if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
+ && (dev->agp == NULL)) {
+ DRM_ERROR("Cannot initialize the agpgart module.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (dev->driver->firstopen) {
+ ret = dev->driver->firstopen(dev);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (first_call) {
+ /* OSOL_drm: moved from drm_get_dev */
+ /* setup the grouping for the legacy output */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
+ if (ret)
+ return ret;
+ }
+ }
+
+ atomic_set(&dev->ioctl_count, 0);
+
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ i = drm_dma_setup(dev);
+ if (i < 0)
+ return i;
+ }
+
+ for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
+ atomic_set(&dev->counts[i], 0);
+
+ dev->context_flag = 0;
+ dev->last_context = 0;
+ dev->if_version = 0;
+
+
+ DRM_DEBUG("\n");
+
+ /*
+ * The kernel's context could be created here, but is now created
+ * in drm_dma_enqueue. This is more resource-efficient for
+ * hardware that does not do DMA, but may mean that
+ * drm_select_queue fails between the time the interrupt is
+ * initialized and the time the queues are initialized.
+ */
+
+ first_call = false;
+ return 0;
+}
+
+/**
+ * Open file.
*
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Daryll Strauss <daryll@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
+ * \return zero on success or a negative number on failure.
*
+ * Searches the DRM device with the same minor number, calls open_helper(), and
+ * increments the device open count. If the open count was previous at zero,
+ * i.e., it's the first that the device is open, then calls setup().
*/
+int drm_open(struct drm_minor *minor, int clone_id, int flags, cred_t *credp)
+{
+ struct drm_device *dev = minor->dev;
+ int retcode = 0;
-/* END CSTYLED */
+ DRM_DEBUG("minor->index=%d, clone_id=%d", minor->index, clone_id);
-#include "drmP.h"
+ retcode = drm_open_helper(minor, clone_id, flags, credp);
+ if (!retcode) {
+ atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+ spin_lock(&dev->count_lock);
+ if (!dev->open_count++) {
+ spin_unlock(&dev->count_lock);
+ retcode = drm_setup(dev);
+ goto out;
+ }
+ spin_unlock(&dev->count_lock);
+ }
+out:
+ return retcode;
+}
-/*ARGSUSED*/
-drm_file_t *
-drm_find_file_by_proc(drm_device_t *dev, cred_t *credp)
+/**
+ * Called whenever a process opens /dev/drm.
+ *
+ * Creates and initializes a drm_file structure for the file private data in \p
+ * filp and add it into the double linked list in \p dev.
+ */
+static int drm_open_helper(struct drm_minor *minor,
+ int clone_id, int flags, cred_t *credp)
{
- pid_t pid = ddi_get_pid();
- drm_file_t *priv;
+ struct drm_device *dev = minor->dev;
+ struct drm_file *priv;
+ int minor_id = minor->index;
+ int ret;
+
+ if (flags & FEXCL)
+ return -EBUSY; /* No exclusive opens */
+
+ if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ return -EINVAL;
- TAILQ_FOREACH(priv, &dev->files, link)
- if (priv->pid == pid)
- return (priv);
- return (NULL);
+ DRM_DEBUG("pid = %d, minor = %d\n", ddi_get_pid(), minor_id);
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ (void) memset(priv, 0, sizeof(*priv));
+ (void) idr_replace(&minor->clone_idr, priv, clone_id); /* OSOL_drm */
+ priv->uid = crgetsuid(credp);
+ priv->pid = ddi_get_pid();
+ priv->minor = minor;
+ priv->ioctl_count = 0;
+ /* for compatibility root is always authenticated */
+ priv->authenticated = DRM_SUSER(credp);
+ priv->lock_count = 0;
+
+ INIT_LIST_HEAD(&priv->lhead);
+ INIT_LIST_HEAD(&priv->fbs);
+ mutex_init(&priv->fbs_lock, NULL, MUTEX_DRIVER, NULL);
+ INIT_LIST_HEAD(&priv->event_list);
+ DRM_INIT_WAITQUEUE(&priv->event_wait, DRM_INTR_PRI(dev));
+ priv->event_space = 4096; /* set aside 4k for event buffer */
+
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_open(dev, priv);
+
+ if (dev->driver->open) {
+ ret = dev->driver->open(dev, priv);
+ if (ret < 0)
+ goto out_free;
+ }
+
+
+ /* if there is no current master make this fd it */
+ mutex_lock(&dev->struct_mutex);
+ if (!priv->minor->master) {
+ /* create a new master */
+ priv->minor->master = drm_master_create(priv->minor);
+ if (!priv->minor->master) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ priv->is_master = 1;
+ /* take another reference for the copy in the local file priv */
+ priv->master = drm_master_get(priv->minor->master);
+
+ priv->authenticated = 1;
+
+ mutex_unlock(&dev->struct_mutex);
+ if (dev->driver->master_create) {
+ ret = dev->driver->master_create(dev, priv->master);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ /* drop both references if this fails */
+ drm_master_put(&priv->minor->master);
+ drm_master_put(&priv->master);
+ mutex_unlock(&dev->struct_mutex);
+ goto out_free;
+ }
+ }
+ mutex_lock(&dev->struct_mutex);
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, priv, true);
+ if (ret) {
+ /* drop both references if this fails */
+ drm_master_put(&priv->minor->master);
+ drm_master_put(&priv->master);
+ mutex_unlock(&dev->struct_mutex);
+ goto out_free;
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
+ } else {
+ /* get a reference to the master */
+ priv->master = drm_master_get(priv->minor->master);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ list_add(&priv->lhead, &dev->filelist, (caddr_t)priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+out_free:
+ kfree(priv, sizeof (*priv));
+ return ret;
}
+void drm_master_release(struct drm_device *dev, struct drm_file *fpriv)
+{
+ struct drm_master *master = fpriv->master;
+
+ if (drm_i_have_hw_lock(dev, fpriv)) {
+ DRM_DEBUG("Process %d dead, freeing lock for context %d",
+ DRM_CURRENTPID, _DRM_LOCKING_CONTEXT(master->lock.hw_lock->lock));
+ (void) drm_lock_free(&master->lock,
+ _DRM_LOCKING_CONTEXT(master->lock.hw_lock->lock));
+ }
+
+}
-drm_cminor_t *
-drm_find_file_by_minor(drm_device_t *dev, int minor)
+static void drm_events_release(struct drm_file *file_priv)
{
- drm_cminor_t *mp;
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_pending_event *e, *et;
+ struct drm_pending_vblank_event *v, *vt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
- TAILQ_FOREACH(mp, &dev->minordevs, link) {
- if (mp->minor == minor)
- return (mp);
+ /* Remove pending flips */
+ list_for_each_entry_safe(v, vt, struct drm_pending_vblank_event, &dev->vblank_event_list, base.link)
+ if (v->base.file_priv == file_priv) {
+ list_del(&v->base.link);
+ drm_vblank_put(dev, v->pipe);
+ v->base.destroy(&v->base, sizeof(struct drm_pending_vblank_event));
}
- return (NULL);
+
+ /* Remove unconsumed events */
+ list_for_each_entry_safe(e, et, struct drm_pending_event, &file_priv->event_list, link)
+ e->destroy(e, sizeof(struct drm_pending_vblank_event));
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
-/* drm_open_helper is called whenever a process opens /dev/drm. */
-/*ARGSUSED*/
+/**
+ * Release file.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * If the hardware lock is held then free it, and take it again for the kernel
+ * context since it's necessary to reclaim buffers. Unlink the file private
+ * data from its list and free it. Decreases the open count and if it reaches
+ * zero calls drm_lastclose().
+ */
int
-drm_open_helper(drm_device_t *dev, drm_cminor_t *mp, int flags,
- int otyp, cred_t *credp)
+drm_release(struct drm_file *file_priv)
{
- drm_file_t *priv;
- pid_t pid;
- int retcode;
+ struct drm_device *dev = file_priv->minor->dev;
+ int retcode = 0;
- if (flags & FEXCL)
- return (EBUSY); /* No exclusive opens */
- dev->flags = flags;
- pid = ddi_get_pid();
- DRM_DEBUG("drm_open_helper :pid = %d", pid);
- DRM_LOCK();
- priv = drm_find_file_by_proc(dev, credp);
- if (priv) {
- priv->refs++;
- } else {
- priv = drm_alloc(sizeof (*priv), DRM_MEM_FILES);
- if (priv == NULL) {
- DRM_UNLOCK();
- return (ENOMEM);
- }
- bzero(priv, sizeof (*priv));
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+ if (dev->driver->preclose)
+ dev->driver->preclose(dev, file_priv);
+
+ /* ========================================================
+ * Begin inline drm_release
+ */
+
+ /* Release any auth tokens that might point to this file_priv,
+ (do that under the drm_global_mutex) */
+ if (file_priv->magic)
+ (void) drm_remove_magic(file_priv->master, file_priv->magic);
+
+ /* if the master has gone away we can't do anything with the lock */
+ if (file_priv->minor->master)
+ drm_master_release(dev, file_priv);
+
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ drm_core_reclaim_buffers(dev, file_priv);
+
+ drm_events_release(file_priv);
- priv->uid = crgetsuid(credp);
- priv->pid = pid;
+ if (dev->driver->driver_features & DRIVER_MODESET)
+ drm_fb_release(file_priv);
- priv->refs = 1;
- priv->minor = 5; /* just for hack */
- priv->ioctl_count = 0;
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_release(dev, file_priv);
- /* for compatibility root is always authenticated */
- priv->authenticated = DRM_SUSER(credp);
+ mutex_lock(&dev->ctxlist_mutex);
+ if (!list_empty(&dev->ctxlist)) {
+ struct drm_ctx_list *pos, *n;
- if (dev->driver->use_gem == 1)
- drm_gem_open(priv);
+ list_for_each_entry_safe(pos, n, struct drm_ctx_list, &dev->ctxlist, head) {
+ if (pos->tag == file_priv &&
+ pos->handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver->context_dtor)
+ dev->driver->context_dtor(dev,
+ pos->handle);
- if (dev->driver->open) {
- retcode = dev->driver->open(dev, priv);
- if (retcode != 0) {
- drm_free(priv, sizeof (*priv), DRM_MEM_FILES);
- DRM_UNLOCK();
- return (retcode);
+ drm_ctxbitmap_free(dev, pos->handle);
+
+ list_del(&pos->head);
+ kfree(pos, sizeof (*pos));
+ --dev->ctx_count;
}
}
+ }
+ mutex_unlock(&dev->ctxlist_mutex);
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (file_priv->is_master) {
+ struct drm_master *master = file_priv->master;
+ struct drm_file *temp;
+ list_for_each_entry(temp, struct drm_file, &dev->filelist, lhead) {
+ if ((temp->master == file_priv->master) &&
+ (temp != file_priv))
+ temp->authenticated = 0;
+ }
+
+ /**
+ * Since the master is disappearing, so is the
+ * possibility to lock.
+ */
+
+ if (master->lock.hw_lock) {
+ master->lock.hw_lock = NULL;
+ master->lock.file_priv = NULL;
+ }
- /* first opener automatically becomes master */
- priv->master = TAILQ_EMPTY(&dev->files);
- TAILQ_INSERT_TAIL(&dev->files, priv, link);
+ if (file_priv->minor->master == file_priv->master) {
+ /* drop the reference held my the minor */
+ if (dev->driver->master_drop)
+ dev->driver->master_drop(dev, file_priv, true);
+ drm_master_put(&file_priv->minor->master);
+ }
+ }
+
+ /* drop the reference held my the file priv */
+ drm_master_put(&file_priv->master);
+ file_priv->is_master = 0;
+ list_del(&file_priv->lhead);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (dev->driver->postclose)
+ dev->driver->postclose(dev, file_priv);
+ kfree(file_priv, sizeof (*file_priv));
+
+ /* ========================================================
+ * End inline drm_release
+ */
+
+ atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
+ spin_lock(&dev->count_lock);
+ if (!--dev->open_count) {
+ if (atomic_read(&dev->ioctl_count)) {
+ DRM_ERROR("Device busy: %d\n",
+ atomic_read(&dev->ioctl_count));
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ spin_unlock(&dev->count_lock);
+ return drm_lastclose(dev);
+ }
+ spin_unlock(&dev->count_lock);
+
+ return retcode;
+}
+
+static bool
+drm_dequeue_event(struct drm_file *file_priv,
+ size_t total, size_t max, struct drm_pending_event **out)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_pending_event *e;
+ unsigned long flags;
+ bool ret = false;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ *out = NULL;
+ if (list_empty(&file_priv->event_list))
+ goto out;
+ e = list_first_entry(&file_priv->event_list,
+ struct drm_pending_event, link);
+ if (e->event->length + total > max)
+ goto out;
+
+ file_priv->event_space += e->event->length;
+ list_del(&e->link);
+ *out = e;
+ ret = true;
+
+out:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return ret;
+}
+
+ssize_t drm_read(struct drm_file *file_priv, struct uio *uiop)
+{
+ struct drm_pending_event *e;
+ size_t total;
+ int ret = 0;
+
+ DRM_WAIT(ret, &file_priv->event_wait,
+ !list_empty(&file_priv->event_list));
+ if (ret < 0) {
+ DRM_ERROR("returns %d event_list is %d", ret, list_empty(&file_priv->event_list));
+ return ret;
+ }
+
+ total = 0;
+ while (drm_dequeue_event(file_priv, total, uiop->uio_iov->iov_len, &e)) {
+ ret = uiomove((caddr_t)e->event, e->event->length, UIO_READ, uiop);
+ if (ret) {
+ DRM_ERROR("Failed to copy to user: %d", ret);
+ return (0);
+ }
+ total += e->event->length;
+ e->destroy(e, sizeof(struct drm_pending_vblank_event));
+ }
+
+ return total;
+}
+
+short drm_poll(struct drm_file *file_priv, short events)
+{
+ short revent = 0;
+
+ if (!list_empty(&file_priv->event_list)) {
+ if (events & POLLIN)
+ revent |= POLLIN;
+ if (events & POLLRDNORM)
+ revent |= POLLRDNORM;
}
- mp->fpriv = priv;
- DRM_UNLOCK();
- return (0);
+
+ return revent;
}
+
+
diff --git a/usr/src/uts/common/io/drm/drm_gem.c b/usr/src/uts/common/io/drm/drm_gem.c
index 69c5fc1..0c5651c 100644
--- a/usr/src/uts/common/io/drm/drm_gem.c
+++ b/usr/src/uts/common/io/drm/drm_gem.c
@@ -1,5 +1,9 @@
/*
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2009, 2013, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,27 +30,10 @@
*
*/
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#include <vm/anon.h>
-#include <vm/seg_kmem.h>
-#include <vm/seg_kp.h>
-#include <vm/seg_map.h>
-#include <sys/fcntl.h>
-#include <sys/vnode.h>
-#include <sys/file.h>
-#include <sys/bitmap.h>
-#include <sys/ddi.h>
-#include <sys/sunddi.h>
-#include <gfx_private.h>
#include "drmP.h"
-#include "drm.h"
+#include <vm/seg_kmem.h>
-/*
- * @file drm_gem.c
+/** @file drm_gem.c
*
* This file provides some of the base ioctls and library routines for
* the graphics memory manager implemented by each device driver.
@@ -67,241 +54,120 @@
* - Inability to allocate high fds will aggravate the X Server's select()
* handling, and likely that of many GL client applications as well.
*
- * This led to a plan of using our own integer IDs(called handles, following
+ * This led to a plan of using our own integer IDs (called handles, following
* DRM terminology) to mimic fds, and implement the fd syscalls we need as
* ioctls. The objects themselves will still include the struct file so
* that we can transition to fds if the required kernel infrastructure shows
* up at a later date, and as our interface with shmfs for memory allocation.
*/
-void
-idr_list_init(struct idr_list *head)
-{
- struct idr_list *entry;
- /* HASH for accelerate */
- entry = kmem_zalloc(DRM_GEM_OBJIDR_HASHNODE
- * sizeof (struct idr_list), KM_SLEEP);
- head->next = entry;
- for (int i = 0; i < DRM_GEM_OBJIDR_HASHNODE; i++) {
- INIT_LIST_HEAD(&entry[i]);
- }
-}
-
-int
-idr_list_get_new_above(struct idr_list *head,
- struct drm_gem_object *obj,
- int *handlep)
-{
- struct idr_list *entry;
- int key;
- entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
- key = obj->name % DRM_GEM_OBJIDR_HASHNODE;
- list_add(entry, &head->next[key], NULL);
- entry->obj = obj;
- entry->handle = obj->name;
- *handlep = obj->name;
- return (0);
-}
-
-struct drm_gem_object *
-idr_list_find(struct idr_list *head,
- uint32_t name)
-{
- struct idr_list *entry;
- int key;
- key = name % DRM_GEM_OBJIDR_HASHNODE;
-
- list_for_each(entry, &head->next[key]) {
- if (entry->handle == name)
- return (entry->obj);
- }
- return (NULL);
-}
+/*
+ * We make up offsets for buffer objects so we can recognize them at
+ * mmap time.
+ */
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-int
-idr_list_remove(struct idr_list *head,
- uint32_t name)
-{
- struct idr_list *entry, *temp;
- int key;
- key = name % DRM_GEM_OBJIDR_HASHNODE;
- list_for_each_safe(entry, temp, &head->next[key]) {
- if (entry->handle == name) {
- list_del(entry);
- kmem_free(entry, sizeof (*entry));
- return (0);
- }
- }
- DRM_ERROR("Failed to remove the object %d", name);
- return (-1);
-}
+int drm_use_mem_pool = 0;
+/* memory pool is used for all platforms now */
+#define HAS_MEM_POOL(gen) ((gen > 30) && (drm_use_mem_pool))
-void
-idr_list_free(struct idr_list *head)
-{
- struct idr_list *entry, *temp;
- for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) {
- list_for_each_safe(entry, temp, &head->next[key]) {
- list_del(entry);
- kmem_free(entry, sizeof (*entry));
- }
- }
- kmem_free(head->next,
- DRM_GEM_OBJIDR_HASHNODE * sizeof (struct idr_list));
- head->next = NULL;
-}
+/**
+ * Initialize the GEM device fields
+ */
int
-idr_list_empty(struct idr_list *head)
+drm_gem_init(struct drm_device *dev)
{
- int empty;
- for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) {
- empty = list_empty(&(head)->next[key]);
- if (!empty)
- return (empty);
- }
- return (1);
-}
-
-static uint32_t shfile_name = 0;
-#define SHFILE_NAME_MAX 0xffffffff
-/*
- * will be set to 1 for 32 bit x86 systems only, in startup.c
- */
-extern int segkp_fromheap;
-extern ulong_t *segkp_bitmap;
-
-void
-drm_gem_object_reference(struct drm_gem_object *obj)
-{
- atomic_inc(&obj->refcount);
-}
+ spin_lock_init(&dev->object_name_lock);
+ idr_list_init(&dev->object_name_idr);
-void
-drm_gem_object_unreference(struct drm_gem_object *obj)
-{
- if (obj == NULL)
- return;
+ gfxp_mempool_init();
- atomic_sub(1, &obj->refcount);
- if (obj->refcount == 0)
- drm_gem_object_free(obj);
+ return 0;
}
void
-drm_gem_object_handle_reference(struct drm_gem_object *obj)
+/* LINTED */
+drm_gem_destroy(struct drm_device *dev)
{
- drm_gem_object_reference(obj);
- atomic_inc(&obj->handlecount);
}
-void
-drm_gem_object_handle_unreference(struct drm_gem_object *obj)
+static void
+drm_gem_object_free_internal(struct drm_gem_object *obj, int gen)
{
- if (obj == NULL)
- return;
-
- /*
- * Must bump handle count first as this may be the last
- * ref, in which case the object would disappear before we
- * checked for a name
- */
- atomic_sub(1, &obj->handlecount);
- if (obj->handlecount == 0)
- drm_gem_object_handle_free(obj);
- drm_gem_object_unreference(obj);
+ if (obj->pfnarray != NULL)
+ kmem_free(obj->pfnarray, btopr(obj->real_size) * sizeof (pfn_t));
+ if (HAS_MEM_POOL(gen)) {
+ gfxp_free_mempool(&obj->mempool_cookie, obj->kaddr, obj->real_size);
+ } else {
+ (void) ddi_dma_unbind_handle(obj->dma_hdl);
+ ddi_dma_mem_free(&obj->acc_hdl);
+ ddi_dma_free_handle(&obj->dma_hdl);
+ }
+ obj->kaddr = NULL;
}
-/*
- * Initialize the GEM device fields
- */
-
-int
-drm_gem_init(struct drm_device *dev)
-{
- mutex_init(&dev->object_name_lock, NULL, MUTEX_DRIVER, NULL);
- idr_list_init(&dev->object_name_idr);
-
- atomic_set(&dev->object_count, 0);
- atomic_set(&dev->object_memory, 0);
- atomic_set(&dev->pin_count, 0);
- atomic_set(&dev->pin_memory, 0);
- atomic_set(&dev->gtt_count, 0);
- atomic_set(&dev->gtt_memory, 0);
- return (0);
-}
+static ddi_dma_attr_t old_dma_attr = {
+ DMA_ATTR_V0,
+ 0xff000U, /* dma_attr_addr_lo */
+ 0xffffffffU, /* dma_attr_addr_hi */
+ 0xffffffffU, /* dma_attr_count_max */
+ 4096, /* dma_attr_align */
+ 0x1fffU, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffU, /* dma_attr_maxxfer */
+ 0xffffffffU, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen, variable */
+ 4, /* dma_attr_granular */
+ DDI_DMA_FLAGERR, /* dma_attr_flags */
+};
+
+static ddi_device_acc_attr_t old_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_MERGING_OK_ACC,
+ DDI_FLAGERR_ACC
+};
-/*
- * Allocate a GEM object of the specified size with shmfs backing store
- */
-struct drm_gem_object *
-drm_gem_object_alloc(struct drm_device *dev, size_t size)
+static int
+drm_gem_object_alloc_internal_normal(struct drm_device *dev, struct drm_gem_object *obj,
+ size_t size, int flag)
{
- static ddi_dma_attr_t dma_attr = {
- DMA_ATTR_V0,
- 0U, /* dma_attr_addr_lo */
- 0xffffffffU, /* dma_attr_addr_hi */
- 0xffffffffU, /* dma_attr_count_max */
- 4096, /* dma_attr_align */
- 0x1fffU, /* dma_attr_burstsizes */
- 1, /* dma_attr_minxfer */
- 0xffffffffU, /* dma_attr_maxxfer */
- 0xffffffffU, /* dma_attr_seg */
- 1, /* dma_attr_sgllen, variable */
- 4, /* dma_attr_granular */
- 0 /* dma_attr_flags */
- };
- static ddi_device_acc_attr_t acc_attr = {
- DDI_DEVICE_ATTR_V0,
- DDI_NEVERSWAP_ACC,
- DDI_MERGING_OK_ACC
- };
- struct drm_gem_object *obj;
ddi_dma_cookie_t cookie;
uint_t cookie_cnt;
- drm_local_map_t *map;
-
pgcnt_t real_pgcnt, pgcnt = btopr(size);
- uint32_t paddr, cookie_end;
+ uint64_t paddr, cookie_end;
int i, n;
+ int (*cb)(caddr_t);
+ ddi_device_acc_attr_t *acc_attr;
+ ddi_dma_attr_t* dma_attr;
+ uint_t mode_flag;
- obj = kmem_zalloc(sizeof (struct drm_gem_object), KM_NOSLEEP);
- if (obj == NULL)
- return (NULL);
+ acc_attr = &old_acc_attr;
+ dma_attr = &old_dma_attr;
+ mode_flag = IOMEM_DATA_UC_WR_COMBINE;
- obj->dev = dev;
- obj->flink = 0;
- obj->size = size;
+ cb = (flag == 0) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
+ dma_attr->dma_attr_sgllen = (int)pgcnt;
- if (shfile_name == SHFILE_NAME_MAX) {
- DRM_ERROR("No name space for object");
+ if (ddi_dma_alloc_handle(dev->devinfo, dma_attr,
+ cb, NULL, &obj->dma_hdl)) {
+ DRM_ERROR("ddi_dma_alloc_handle failed");
goto err1;
- } else {
- obj->name = ++shfile_name;
}
-
- dma_attr.dma_attr_sgllen = (int)pgcnt;
-
- if (ddi_dma_alloc_handle(dev->dip, &dma_attr,
- DDI_DMA_DONTWAIT, NULL, &obj->dma_hdl)) {
- DRM_ERROR("drm_gem_object_alloc: "
- "ddi_dma_alloc_handle failed");
- goto err1;
- }
- if (ddi_dma_mem_alloc(obj->dma_hdl, ptob(pgcnt), &acc_attr,
- IOMEM_DATA_UC_WR_COMBINE, DDI_DMA_DONTWAIT, NULL,
+ if (ddi_dma_mem_alloc(obj->dma_hdl, ptob(pgcnt), acc_attr,
+ mode_flag, cb, NULL,
&obj->kaddr, &obj->real_size, &obj->acc_hdl)) {
- DRM_ERROR("drm_gem_object_alloc: "
- "ddi_dma_mem_alloc failed");
+ DRM_ERROR("ddi_dma_mem_alloc failed");
goto err2;
}
if (ddi_dma_addr_bind_handle(obj->dma_hdl, NULL,
obj->kaddr, obj->real_size, DDI_DMA_RDWR,
- DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_cnt)
+ cb, NULL, &cookie, &cookie_cnt)
!= DDI_DMA_MAPPED) {
- DRM_ERROR("drm_gem_object_alloc: "
- "ddi_dma_addr_bind_handle failed");
+ DRM_ERROR("ddi_dma_addr_bind_handle failed");
goto err3;
}
@@ -309,83 +175,232 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
obj->pfnarray = kmem_zalloc(real_pgcnt * sizeof (pfn_t), KM_NOSLEEP);
if (obj->pfnarray == NULL) {
+ DRM_DEBUG("pfnarray == NULL");
goto err4;
}
+
for (n = 0, i = 1; ; i++) {
- for (paddr = cookie.dmac_address,
- cookie_end = cookie.dmac_address + cookie.dmac_size;
+ for (paddr = cookie.dmac_laddress,
+ cookie_end = cookie.dmac_laddress + cookie.dmac_size;
paddr < cookie_end;
paddr += PAGESIZE) {
obj->pfnarray[n++] = btop(paddr);
if (n >= real_pgcnt)
- goto addmap;
+ return (0);
}
if (i >= cookie_cnt)
break;
ddi_dma_nextcookie(obj->dma_hdl, &cookie);
}
-addmap:
+err4:
+ (void) ddi_dma_unbind_handle(obj->dma_hdl);
+err3:
+ ddi_dma_mem_free(&obj->acc_hdl);
+err2:
+ ddi_dma_free_handle(&obj->dma_hdl);
+err1:
+ return (-1);
+
+}
+
+/* Alloc GEM object by memory pool */
+static int
+drm_gem_object_alloc_internal_mempool(struct drm_gem_object *obj,
+ size_t size, int flag)
+{
+ int ret;
+ pgcnt_t pgcnt = btopr(size);
+
+ obj->pfnarray = kmem_zalloc(pgcnt * sizeof (pfn_t), KM_NOSLEEP);
+ if (obj->pfnarray == NULL) {
+ DRM_ERROR("Failed to allocate pfnarray ");
+ return (-1);
+ }
+
+ ret = gfxp_alloc_from_mempool(&obj->mempool_cookie, &obj->kaddr,
+ obj->pfnarray, pgcnt, flag);
+ if (ret) {
+ DRM_ERROR("Failed to alloc pages from memory pool");
+ kmem_free(obj->pfnarray, pgcnt * sizeof (pfn_t));
+ return (-1);
+ }
+
+ obj->real_size = size;
+ return (0);
+}
+
+static int
+drm_gem_object_internal(struct drm_device *dev, struct drm_gem_object *obj,
+ size_t size, int gen)
+{
+ pfn_t tmp_pfn;
+ int ret, num = 0;
+
+alloc_again:
+ if (HAS_MEM_POOL(gen)) {
+ uint32_t mode;
+ if (gen >= 60)
+ mode = GFXP_MEMORY_CACHED;
+ else
+ mode = GFXP_MEMORY_WRITECOMBINED;
+ ret = drm_gem_object_alloc_internal_mempool(obj, size, mode);
+ if (ret)
+ return (-1);
+ } else {
+ ret = drm_gem_object_alloc_internal_normal(dev, obj, size, 0);
+ if (ret)
+ return (-1);
+ }
+ tmp_pfn = hat_getpfnum(kas.a_hat, obj->kaddr);
+ if (tmp_pfn != obj->pfnarray[0]) {
+ DRM_ERROR("obj %p map incorrect 0x%lx != 0x%lx",
+ (void *)obj, tmp_pfn, obj->pfnarray[0]);
+ drm_gem_object_free_internal(obj, gen);
+ udelay(150);
+
+ if (num++ < 5)
+ goto alloc_again;
+ else
+ return (-1);
+ }
+
+ return (0);
+}
+/*
+ * Initialize an already allocate GEM object of the specified size with
+ * shmfs backing store.
+ */
+int
+drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
+ size_t size, int gen)
+{
+ drm_local_map_t *map;
+ int ret;
+
+ if (size == 0) {
+ DRM_DEBUG("size == 0");
+ return (-1);
+ }
+
+ obj->dev = dev;
+ obj->size = size;
+
+ ret = drm_gem_object_internal(dev, obj, size, gen);
+ if (ret)
+ return (-1);
+
map = drm_alloc(sizeof (struct drm_local_map), DRM_MEM_MAPS);
if (map == NULL) {
+ DRM_DEBUG("map == NULL");
goto err5;
}
map->handle = obj;
map->offset = (uintptr_t)map->handle;
map->offset &= 0xffffffffUL;
- map->dev_addr = map->handle;
map->size = obj->real_size;
- map->type = _DRM_TTM;
+ map->type = _DRM_GEM;
+ map->callback = 0;
map->flags = _DRM_WRITE_COMBINING | _DRM_REMOVABLE;
- map->drm_umem_cookie =
+ map->umem_cookie =
gfxp_umem_cookie_init(obj->kaddr, obj->real_size);
- if (map->drm_umem_cookie == NULL) {
+ if (map->umem_cookie == NULL) {
+ DRM_DEBUG("umem_cookie == NULL");
goto err6;
}
- obj->map = map;
-
- atomic_set(&obj->refcount, 1);
- atomic_set(&obj->handlecount, 1);
- if (dev->driver->gem_init_object != NULL &&
- dev->driver->gem_init_object(obj) != 0) {
+ obj->maplist.map = map;
+ if (drm_map_handle(dev, &obj->maplist)) {
+ DRM_DEBUG("drm_map_handle failed");
goto err7;
}
- atomic_inc(&dev->object_count);
- atomic_add(obj->size, &dev->object_memory);
- return (obj);
+ kref_init(&obj->refcount);
+ atomic_set(&obj->handle_count, 0);
+
+ if (MDB_TRACK_ENABLE) {
+ INIT_LIST_HEAD(&obj->track_list);
+ spin_lock(&dev->track_lock);
+ list_add_tail(&obj->track_list, &dev->gem_objects_list, (caddr_t)obj);
+ spin_unlock(&dev->track_lock);
+
+ INIT_LIST_HEAD(&obj->his_list);
+ drm_gem_object_track(obj, "obj init", 0, 0, NULL);
+ }
+
+ INIT_LIST_HEAD(&obj->seg_list);
+
+ return (0);
err7:
- gfxp_umem_cookie_destroy(map->drm_umem_cookie);
+ gfxp_umem_cookie_destroy(map->umem_cookie);
err6:
drm_free(map, sizeof (struct drm_local_map), DRM_MEM_MAPS);
err5:
- kmem_free(obj->pfnarray, real_pgcnt * sizeof (pfn_t));
-err4:
- (void) ddi_dma_unbind_handle(obj->dma_hdl);
-err3:
- ddi_dma_mem_free(&obj->acc_hdl);
-err2:
- ddi_dma_free_handle(&obj->dma_hdl);
-err1:
- kmem_free(obj, sizeof (struct drm_gem_object));
+ drm_gem_object_free_internal(obj, gen);
+ return (-1);
+}
- return (NULL);
+/**
+ * Initialize an already allocated GEM object of the specified size with
+ * no GEM provided backing store. Instead the caller is responsible for
+ * backing the object and handling it.
+ */
+int drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
+{
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj->dev = dev;
+
+ kref_init(&obj->refcount);
+ atomic_set(&obj->handle_count, 0);
+ obj->size = size;
+
+ return 0;
}
-/*
+/**
+ * Allocate a GEM object of the specified size with shmfs backing store
+ */
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ goto free;
+
+ if (drm_gem_object_init(dev, obj, size, 0) != 0) {
+ kmem_free(obj, sizeof (struct drm_gem_object));
+ return NULL;
+ }
+
+ if (dev->driver->gem_init_object != NULL &&
+ dev->driver->gem_init_object(obj) != 0) {
+ goto fput;
+ }
+ return obj;
+fput:
+ /* Object_init mangles the global counters - readjust them. */
+ drm_gem_object_release(obj);
+ kfree(obj, sizeof(*obj));
+free:
+ return NULL;
+}
+
+/**
* Removes the mapping from handle to filp for this object.
*/
-static int
-drm_gem_handle_delete(struct drm_file *filp, int handle)
+int
+drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
struct drm_device *dev;
struct drm_gem_object *obj;
- int err;
- /*
- * This is gross. The idr system doesn't let us try a delete and
+
+ /* This is gross. The idr system doesn't let us try a delete and
* return an error code. It just spews if you fail at deleting.
* So, we have to grab a lock around finding the object and then
* doing the delete on it and dropping the refcount, or the user
@@ -400,34 +415,32 @@ drm_gem_handle_delete(struct drm_file *filp, int handle)
obj = idr_list_find(&filp->object_idr, handle);
if (obj == NULL) {
spin_unlock(&filp->table_lock);
- DRM_ERROR("obj %d is not in tne list, failed to close", handle);
- return (EINVAL);
+ return -EINVAL;
}
dev = obj->dev;
/* Release reference and decrement refcount. */
- err = idr_list_remove(&filp->object_idr, handle);
- if (err == -1)
- DRM_ERROR("%s", __func__);
-
+ (void) idr_list_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
- spin_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- spin_unlock(&dev->struct_mutex);
- return (0);
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, filp);
+ drm_gem_object_handle_unreference_unlocked(obj);
+
+ return 0;
}
-/*
+/**
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
*/
int
drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
- int *handlep)
+ struct drm_gem_object *obj,
+ u32 *handlep)
{
+ struct drm_device *dev = obj->dev;
int ret;
/*
@@ -435,27 +448,37 @@ drm_gem_handle_create(struct drm_file *file_priv,
*/
again:
/* ensure there is space available to allocate a handle */
+ if (idr_list_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
+ return -ENOMEM;
/* do the allocation under our spinlock */
spin_lock(&file_priv->table_lock);
- ret = idr_list_get_new_above(&file_priv->object_idr, obj, handlep);
+ ret = idr_list_get_new_above(&file_priv->object_idr, (void *)obj, (int *)handlep);
spin_unlock(&file_priv->table_lock);
if (ret == -EAGAIN)
goto again;
- if (ret != 0) {
- DRM_ERROR("Failed to create handle");
- return (ret);
- }
+ if (ret != 0)
+ return ret;
drm_gem_object_handle_reference(obj);
- return (0);
+
+ if (dev->driver->gem_open_object) {
+ ret = dev->driver->gem_open_object(obj, file_priv);
+ if (ret) {
+ (void) drm_gem_handle_delete(file_priv, *handlep);
+ return ret;
+ }
+ }
+
+ return 0;
}
-/* Returns a reference to the object named by the handle. */
+/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
-drm_gem_object_lookup(struct drm_file *filp,
- int handle)
+/* LINTED */
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+ u32 handle)
{
struct drm_gem_object *obj;
@@ -463,210 +486,237 @@ drm_gem_object_lookup(struct drm_file *filp,
/* Check if we currently have a reference on the object */
obj = idr_list_find(&filp->object_idr, handle);
- if (obj == NULL) {
- spin_unlock(&filp->table_lock);
- DRM_ERROR("object_lookup failed, handle %d", handle);
- return (NULL);
- }
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ return NULL;
+ }
drm_gem_object_reference(obj);
spin_unlock(&filp->table_lock);
- return (obj);
+ return obj;
}
-/*
+/**
* Releases the handle to an mm object.
*/
-/*ARGSUSED*/
int
+/* LINTED */
drm_gem_close_ioctl(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- struct drm_gem_close args;
+ struct drm_gem_close *args = data;
int ret;
- if (!(dev->driver->use_gem == 1))
- return (ENODEV);
-
- DRM_COPYFROM_WITH_RETURN(&args,
- (void *)data, sizeof (args));
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
- ret = drm_gem_handle_delete(fpriv, args.handle);
+ ret = drm_gem_handle_delete(file, args->handle);
- return (ret);
+ return ret;
}
-/*
+/**
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
* is freed, the name goes away.
*/
-/*ARGSUSED*/
int
+/* LINTED */
drm_gem_flink_ioctl(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- struct drm_gem_flink args;
+ struct drm_gem_flink *args = data;
struct drm_gem_object *obj;
- int ret, handle;
+ int ret;
- if (!(dev->driver->use_gem == 1))
- return (ENODEV);
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
- DRM_COPYFROM_WITH_RETURN(&args,
- (void *)data, sizeof (args));
- obj = drm_gem_object_lookup(fpriv, args.handle);
+ obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
- return (EINVAL);
- handle = args.handle;
+ return -ENOENT;
+
+again:
+ if (idr_list_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
spin_lock(&dev->object_name_lock);
- if (!obj->flink) {
- /* only creat a node in object_name_idr, no update anything */
- ret = idr_list_get_new_above(&dev->object_name_idr,
- obj, &handle);
- obj->flink = obj->name;
+ if (!obj->name) {
+ ret = idr_list_get_new_above(&dev->object_name_idr, (void *) obj,
+ &obj->name);
+ args->name = (uint64_t) obj->name;
+ spin_unlock(&dev->object_name_lock);
+
+ if (ret == -EAGAIN)
+ goto again;
+
+ if (ret != 0)
+ goto err;
+
/* Allocate a reference for the name table. */
drm_gem_object_reference(obj);
+ } else {
+ args->name = (uint64_t) obj->name;
+ spin_unlock(&dev->object_name_lock);
+ ret = 0;
}
- /*
- * Leave the reference from the lookup around as the
- * name table now holds one
- */
- args.name = obj->name;
- spin_unlock(&dev->object_name_lock);
- ret = DRM_COPY_TO_USER((void *) data, &args, sizeof (args));
- if (ret != 0)
- DRM_ERROR(" gem flink error! %d", ret);
-
- spin_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
-
- return (ret);
+err:
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
}
-/*
+/**
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
* will not go away until the handle is deleted.
*/
-/*ARGSUSED*/
int
+/* LINTED */
drm_gem_open_ioctl(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- struct drm_gem_open args;
+ struct drm_gem_open *args = data;
struct drm_gem_object *obj;
int ret;
- int handle;
+ u32 handle;
- if (!(dev->driver->use_gem == 1)) {
- DRM_ERROR("Not support GEM");
- return (ENODEV);
- }
- DRM_COPYFROM_WITH_RETURN(&args,
- (void *) data, sizeof (args));
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
spin_lock(&dev->object_name_lock);
-
- obj = idr_list_find(&dev->object_name_idr, args.name);
-
+ obj = idr_list_find(&dev->object_name_idr, (int) args->name);
if (obj)
drm_gem_object_reference(obj);
spin_unlock(&dev->object_name_lock);
- if (!obj) {
- DRM_ERROR("Can't find the obj %d", args.name);
- return (ENOENT);
- }
+ if (!obj)
+ return -ENOENT;
- ret = drm_gem_handle_create(fpriv, obj, &handle);
- spin_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
+ ret = drm_gem_handle_create(file, obj, &handle);
+ drm_gem_object_unreference_unlocked(obj);
+ if (ret)
+ return ret;
- args.handle = args.name;
- args.size = obj->size;
+ args->handle = handle;
+ args->size = obj->size;
- ret = DRM_COPY_TO_USER((void *) data, &args, sizeof (args));
- if (ret != 0)
- DRM_ERROR(" gem open error! %d", ret);
- return (ret);
+ return 0;
}
-/*
+/**
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
void
-drm_gem_open(struct drm_file *file_private)
+/* LINTED */
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
{
idr_list_init(&file_private->object_idr);
- mutex_init(&file_private->table_lock, NULL, MUTEX_DRIVER, NULL);
+ spin_lock_init(&file_private->table_lock);
}
-/*
+/**
* Called at device close to release the file's
* handle references on objects.
*/
-static void
-drm_gem_object_release_handle(struct drm_gem_object *obj)
+static int
+/* LINTED */
+drm_gem_object_release_handle(int id, void *ptr, void *data)
{
- drm_gem_object_handle_unreference(obj);
+ struct drm_file *file_priv = data;
+ struct drm_gem_object *obj = ptr;
+ struct drm_device *dev = obj->dev;
+
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, file_priv);
+
+ drm_gem_object_handle_unreference_unlocked(obj);
+
+ return 0;
}
-/*
+/**
* Called at close time when the filp is going away.
*
* Releases any remaining references on objects by this filp.
*/
void
+/* LINTED E_FUNC_ARG_UNUSED */
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
struct idr_list *entry;
- spin_lock(&dev->struct_mutex);
-
- idr_list_for_each(entry, &file_private->object_idr)
- drm_gem_object_release_handle(entry->obj);
+ struct drm_gem_object *obj;
+ idr_list_for_each(entry, &file_private->object_idr) {
+ obj = (struct drm_gem_object *)entry->obj;
+ (void) drm_gem_object_release_handle(obj->name, obj, (void *)file_private);
+ }
idr_list_free(&file_private->object_idr);
- spin_unlock(&dev->struct_mutex);
+}
+void
+drm_gem_object_release(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_local_map *map = obj->maplist.map;
+
+ if (MDB_TRACK_ENABLE) {
+ spin_lock(&dev->track_lock);
+ list_del(&obj->track_list);
+ spin_unlock(&dev->track_lock);
+
+ struct drm_history_list *r_list, *list_temp;
+ list_for_each_entry_safe(r_list, list_temp, struct drm_history_list, &obj->his_list, head) {
+ list_del(&r_list->head);
+ drm_free(r_list, sizeof (struct drm_history_list), DRM_MEM_MAPS);
+ }
+ list_del(&obj->his_list);
+ }
+
+ (void) idr_remove(&dev->map_idr, obj->maplist.user_token >> PAGE_SHIFT);
+ gfxp_umem_cookie_destroy(map->umem_cookie);
+ drm_free(map, sizeof (struct drm_local_map), DRM_MEM_MAPS);
+
+ kmem_free(obj->pfnarray, btopr(obj->real_size) * sizeof (pfn_t));
+
+ if (obj->dma_hdl == NULL) {
+ gfxp_free_mempool(&obj->mempool_cookie, obj->kaddr, obj->real_size);
+ } else {
+ (void) ddi_dma_unbind_handle(obj->dma_hdl);
+ ddi_dma_mem_free(&obj->acc_hdl);
+ ddi_dma_free_handle(&obj->dma_hdl);
+ }
+ obj->kaddr = NULL;
}
-/*
+/**
* Called after the last reference to the object has been lost.
*
* Frees the object
*/
void
-drm_gem_object_free(struct drm_gem_object *obj)
+drm_gem_object_free(struct kref *kref)
{
+ /* LINTED */
+ struct drm_gem_object *obj = (struct drm_gem_object *) kref;
struct drm_device *dev = obj->dev;
- struct drm_local_map *map = obj->map;
+
+// BUG_ON(!mutex_is_locked(&dev->struct_mutex));
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
+}
- gfxp_umem_cookie_destroy(map->drm_umem_cookie);
- drm_free(map, sizeof (struct drm_local_map), DRM_MEM_MAPS);
-
- kmem_free(obj->pfnarray, btopr(obj->real_size) * sizeof (pfn_t));
-
- (void) ddi_dma_unbind_handle(obj->dma_hdl);
- ddi_dma_mem_free(&obj->acc_hdl);
- ddi_dma_free_handle(&obj->dma_hdl);
-
- atomic_dec(&dev->object_count);
- atomic_sub(obj->size, &dev->object_memory);
- kmem_free(obj, sizeof (struct drm_gem_object));
+/* LINTED E_FUNC_ARG_UNUSED */
+static void drm_gem_object_ref_bug(struct kref *list_kref)
+{
+ BUG_ON(1);
}
-/*
+/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
@@ -676,23 +726,78 @@ drm_gem_object_free(struct drm_gem_object *obj)
void
drm_gem_object_handle_free(struct drm_gem_object *obj)
{
- int err;
struct drm_device *dev = obj->dev;
+
/* Remove any name for this object */
spin_lock(&dev->object_name_lock);
- if (obj->flink) {
- err = idr_list_remove(&dev->object_name_idr, obj->name);
- if (err == -1)
- DRM_ERROR("%s", __func__);
- obj->flink = 0;
+ if (obj->name) {
+ (void) idr_list_remove(&dev->object_name_idr, obj->name);
+ obj->name = 0;
spin_unlock(&dev->object_name_lock);
/*
* The object name held a reference to this object, drop
* that now.
+ *
+ * This cannot be the last reference, since the handle holds one too.
*/
- drm_gem_object_unreference(obj);
+ kref_put(&obj->refcount, drm_gem_object_ref_bug);
} else
-
spin_unlock(&dev->object_name_lock);
}
+
+int
+drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+ obj->gtt_map_kaddr = gfxp_alloc_kernel_space(obj->real_size);
+ if (obj->gtt_map_kaddr == NULL) {
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void
+drm_gem_mmap(struct drm_gem_object *obj, pfn_t pfn)
+{
+ gfxp_load_kernel_space(pfn, obj->real_size, GFXP_MEMORY_WRITECOMBINED, obj->gtt_map_kaddr);
+}
+
+void
+drm_gem_release_mmap(struct drm_gem_object *obj)
+{
+ gfxp_unload_kernel_space(obj->gtt_map_kaddr, obj->real_size);
+}
+
+void
+drm_gem_free_mmap_offset(struct drm_gem_object *obj)
+{
+ struct ddi_umem_cookie *umem_cookie = obj->maplist.map->umem_cookie;
+ umem_cookie->cvaddr = obj->kaddr;
+
+ if (obj->maplist.map->gtt_mmap == 0) {
+ gfxp_free_kernel_space(obj->gtt_map_kaddr, obj->real_size);
+ DRM_DEBUG("already freed, don't free more than once!");
+ }
+
+ if (obj->maplist.map->gtt_mmap == 1) {
+ gfxp_unmap_kernel_space(obj->gtt_map_kaddr, obj->real_size);
+ obj->maplist.map->gtt_mmap = 0;
+ }
+
+ obj->gtt_map_kaddr = NULL;
+}
+
+void
+drm_gem_object_track(struct drm_gem_object *obj, const char *name,
+ uint32_t cur_seq, uint32_t last_seq, void* ptr)
+{
+ struct drm_history_list *list;
+ list = drm_alloc(sizeof (struct drm_history_list), DRM_MEM_MAPS);
+ if (list != NULL) {
+ (void) memcpy(list->info, name, (strlen(name) * sizeof(char)));
+ list->cur_seq = cur_seq;
+ list->last_seq = last_seq;
+ list->ring_ptr = ptr;
+ list_add_tail(&list->head, &obj->his_list, (caddr_t)list);
+ }
+}
diff --git a/usr/src/uts/common/io/drm/drm_io32.c b/usr/src/uts/common/io/drm/drm_io32.c
new file mode 100644
index 0000000..60b04c5
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_io32.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include "drm.h"
+#include "drmP.h"
+#include "drm_io32.h"
+
+#ifdef _MULTI_DATAMODEL
+
+int
+copyin32_drm_map(void *dest, void *src)
+{
+ struct drm_map *dest64 = dest;
+ struct drm_map_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->offset = dest32.offset;
+ dest64->size = dest32.size;
+ dest64->type = dest32.type;
+ dest64->flags = dest32.flags;
+ dest64->handle = dest32.handle;
+ dest64->mtrr = dest32.mtrr;
+
+ return (0);
+}
+
+int
+copyout32_drm_map(void *dest, void *src)
+{
+ struct drm_map *src64 = src;
+ struct drm_map_32 src32;
+
+ src32.offset = src64->offset;
+ src32.size = (uint32_t)src64->size;
+ src32.type = src64->type;
+ src32.flags = src64->flags;
+ src32.handle = src64->handle;
+ src32.mtrr = src64->mtrr;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_buf_desc(void *dest, void *src)
+{
+ struct drm_buf_desc *dest64 = dest;
+ struct drm_buf_desc_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->count = dest32.count;
+ dest64->size = dest32.size;
+ dest64->low_mark = dest32.low_mark;
+ dest64->high_mark = dest32.high_mark;
+ dest64->flags = dest32.flags;
+ dest64->agp_start = dest32.agp_start;
+
+ return (0);
+}
+
+int
+copyout32_drm_buf_desc(void *dest, void *src)
+{
+ struct drm_buf_desc *src64 = src;
+ struct drm_buf_desc_32 src32;
+
+ src32.count = src64->count;
+ src32.size = (uint32_t)src64->size;
+ src32.low_mark = src64->low_mark;
+ src32.high_mark = src64->high_mark;
+ src32.flags = src64->flags;
+ src32.agp_start = (uint32_t)src64->agp_start;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_buf_free(void *dest, void *src)
+{
+ struct drm_buf_free *dest64 = dest;
+ struct drm_buf_free_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->count = dest32.count;
+ dest64->list = (int *)(uintptr_t)dest32.list;
+
+ return (0);
+}
+
+int
+copyin32_drm_buf_map(void *dest, void *src)
+{
+ struct drm_buf_map *dest64 = dest;
+ struct drm_buf_map_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->count = dest32.count;
+ dest64->virtual = (void *)(uintptr_t)dest32.virtual;
+ dest64->list = (drm_buf_pub_t *)(uintptr_t)dest32.list;
+ dest64->fd = dest32.fd;
+
+ return (0);
+}
+
+int
+copyout32_drm_buf_map(void *dest, void *src)
+{
+ struct drm_buf_map *src64 = src;
+ struct drm_buf_map_32 src32;
+
+ src32.count = src64->count;
+ src32.virtual = (caddr32_t)(uintptr_t)src64->virtual;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_ctx_priv_map(void *dest, void *src)
+{
+ struct drm_ctx_priv_map *dest64 = dest;
+ struct drm_ctx_priv_map_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->ctx_id = dest32.ctx_id;
+ dest64->handle = (void *)(uintptr_t)dest32.handle;
+
+ return (0);
+}
+
+int
+copyout32_drm_ctx_priv_map(void *dest, void *src)
+{
+ struct drm_ctx_priv_map *src64 = src;
+ struct drm_ctx_priv_map_32 src32;
+
+ src32.ctx_id = src64->ctx_id;
+ src32.handle = (caddr32_t)(uintptr_t)src64->handle;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_ctx_res(void *dest, void *src)
+{
+ struct drm_ctx_res *dest64 = dest;
+ struct drm_ctx_res_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->count = dest32.count;
+ dest64->contexts = (struct drm_ctx *)(uintptr_t)dest32.contexts;
+
+ return (0);
+}
+
+int
+copyout32_drm_ctx_res(void *dest, void *src)
+{
+ struct drm_ctx_res *src64 = src;
+ struct drm_ctx_res_32 src32;
+
+ src32.count = src64->count;
+ src32.contexts = (caddr32_t)(uintptr_t)src64->contexts;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_unique(void *dest, void *src)
+{
+ struct drm_unique *dest64 = dest;
+ struct drm_unique_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->unique_len = dest32.unique_len;
+ dest64->unique = (char __user *)(uintptr_t)dest32.unique;
+
+ return (0);
+}
+
+int
+copyout32_drm_unique(void *dest, void *src)
+{
+ struct drm_unique *src64 = src;
+ struct drm_unique_32 src32;
+
+ src32.unique_len = src64->unique_len;
+ src32.unique = (caddr32_t)(uintptr_t)src64->unique;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_client(void *dest, void *src)
+{
+ struct drm_client *dest64 = dest;
+ struct drm_client_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->idx = dest32.idx;
+ dest64->auth = dest32.auth;
+ dest64->pid = dest32.pid;
+ dest64->uid = dest32.uid;
+ dest64->magic = dest32.magic;
+ dest64->iocs = dest32.iocs;
+
+ return (0);
+}
+
+int
+copyout32_drm_client(void *dest, void *src)
+{
+ struct drm_client *src64 = src;
+ struct drm_client_32 src32;
+
+ src32.idx = src64->idx;
+ src32.auth = src64->auth;
+ src32.pid = (uint32_t)src64->pid;
+ src32.uid = (uint32_t)src64->uid;
+ src32.magic = (uint32_t)src64->magic;
+ src32.iocs = (uint32_t)src64->iocs;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyout32_drm_stats(void *dest, void *src)
+{
+ struct drm_stats *src64 = src;
+ struct drm_stats_32 src32;
+ int i;
+
+ src32.count = (uint32_t)src64->count;
+ for (i = 0; i < 15; i++) {
+ src32.data[i].value = src64->data[i].value;
+ src32.data[i].type = src64->data[i].type;
+ }
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_version(void *dest, void *src)
+{
+ struct drm_version *dest64 = dest;
+ struct drm_version_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->name_len = dest32.name_len;
+ dest64->name = (char *)(uintptr_t)dest32.name;
+ dest64->date_len = dest32.date_len;
+ dest64->date = (char *)(uintptr_t)dest32.date;
+ dest64->desc_len = dest32.desc_len;
+ dest64->desc = (char *)(uintptr_t)dest32.desc;
+
+ return (0);
+}
+
+int
+copyout32_drm_version(void *dest, void *src)
+{
+ struct drm_version *src64 = src;
+ struct drm_version_32 src32;
+
+ src32.version_major = src64->version_major;
+ src32.version_minor = src64->version_minor;
+ src32.version_patchlevel = src64->version_patchlevel;
+ src32.name_len = (uint32_t)src64->name_len;
+ src32.name = (caddr32_t)(uintptr_t)src64->name;
+ src32.date_len = (uint32_t)src64->date_len;
+ src32.date = (caddr32_t)(uintptr_t)src64->date;
+ src32.desc_len = (uint32_t)src64->desc_len;
+ src32.desc = (caddr32_t)(uintptr_t)src64->desc;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_wait_vblank(void *dest, void *src)
+{
+ union drm_wait_vblank *dest64 = dest;
+ union drm_wait_vblank_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->request.type = dest32.request.type;
+ dest64->request.sequence = dest32.request.sequence;
+ dest64->request.signal = dest32.request.signal;
+
+ return (0);
+}
+
+int
+copyout32_drm_wait_vblank(void *dest, void *src)
+{
+ union drm_wait_vblank *src64 = src;
+ union drm_wait_vblank_32 src32;
+
+ src32.reply.type = src64->reply.type;
+ src32.reply.sequence = src64->reply.sequence;
+ src32.reply.tval_sec = (int32_t)src64->reply.tval_sec;
+ src32.reply.tval_usec = (int32_t)src64->reply.tval_usec;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_scatter_gather(void *dest, void *src)
+{
+ struct drm_scatter_gather *dest64 = dest;
+ struct drm_scatter_gather_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->size = dest32.size;
+ dest64->handle = dest32.handle;
+
+ return (0);
+}
+
+int
+copyout32_drm_scatter_gather(void *dest, void *src)
+{
+ struct drm_scatter_gather *src64 = src;
+ struct drm_scatter_gather_32 src32;
+
+ src32.size = (uint32_t)src64->size;
+ src32.handle = (uint32_t)src64->handle;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+#endif
diff --git a/usr/src/uts/common/io/drm/drm_ioctl.c b/usr/src/uts/common/io/drm/drm_ioctl.c
index 8d504a1..7efdf2b 100644
--- a/usr/src/uts/common/io/drm/drm_ioctl.c
+++ b/usr/src/uts/common/io/drm/drm_ioctl.c
@@ -1,8 +1,22 @@
/*
- * drm_ioctl.h -- IOCTL processing for DRM -*- linux-c -*-
- * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+/**
+ * \file drm_ioctl.c
+ * IOCTL processing for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
*/
+
/*
+ * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
+ *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -25,400 +39,293 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
- */
-
-/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "drmP.h"
+#include "drm_core.h"
#include "drm_io32.h"
-/*
- * Beginning in revision 1.1 of the DRM interface, getunique will return
- * a unique in the form pci:oooo:bb:dd.f (o=domain, b=bus, d=device, f=function)
- * before setunique has been called. The format for the bus-specific part of
- * the unique is not defined for any other bus.
+/**
+ * Get the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from drm_device::unique into user space.
*/
-/*ARGSUSED*/
-int
-drm_getunique(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_getunique(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_unique_t u1;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_unique_32_t u32;
-
- DRM_COPYFROM_WITH_RETURN(&u32, (void *)data, sizeof (u32));
- u1.unique_len = u32.unique_len;
- u1.unique = (char __user *)(uintptr_t)u32.unique;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&u1, (void *)data, sizeof (u1));
-
- if (u1.unique_len >= dev->unique_len) {
- if (dev->unique_len == 0) {
- DRM_ERROR("drm_getunique: dev->unique_len = 0");
- return (EFAULT);
- }
- if (DRM_COPY_TO_USER(u1.unique, dev->unique, dev->unique_len))
- return (EFAULT);
- }
- u1.unique_len = dev->unique_len;
+ struct drm_unique *u = data;
+ struct drm_master *master = file->master;
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_unique_32_t u32;
+ if (master->unique_len == 0 || master->unique == NULL) {
+ return -EFAULT;
+ }
- u32.unique_len = (uint32_t)u1.unique_len;
- u32.unique = (caddr32_t)(uintptr_t)u1.unique;
- DRM_COPYTO_WITH_RETURN((void *)data, &u32, sizeof (u32));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data, &u1, sizeof (u1));
+ if (u->unique_len >= master->unique_len) {
+ if (u->unique == NULL) {
+ return -EINVAL;
+ }
+ if (DRM_COPY_TO_USER(u->unique, master->unique, master->unique_len))
+ return -EFAULT;
+ }
+ u->unique_len = master->unique_len;
- return (0);
+ return 0;
}
/*
* Deprecated in DRM version 1.1, and will return EBUSY when setversion has
* requested version 1.1 or greater.
*/
-/*ARGSUSED*/
-int
-drm_setunique(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_setunique(DRM_IOCTL_ARGS)
{
- return (EINVAL);
+ return -EINVAL;
}
-
-static int
-drm_set_busid(drm_device_t *dev)
+static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
- DRM_LOCK();
-
- if (dev->unique != NULL) {
- DRM_UNLOCK();
- return (EBUSY);
- }
-
- dev->unique_len = 20;
- dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
- if (dev->unique == NULL) {
- DRM_UNLOCK();
- return (ENOMEM);
- }
-
- (void) snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%1x",
- dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
-
- DRM_UNLOCK();
-
- return (0);
+ struct drm_master *master = file_priv->master;
+ int len;
+
+ if (master->unique != NULL)
+ return -EBUSY;
+
+ master->unique_len = 40;
+ master->unique_size = master->unique_len;
+ master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ if (master->unique == NULL)
+ return -ENOMEM;
+
+ len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%1x",
+ dev->pdev->domain, dev->pdev->bus, dev->pdev->slot, dev->pdev->func);
+ if (len >= master->unique_len)
+ DRM_ERROR("buffer overflow");
+ else
+ master->unique_len = len;
+
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_getmap(DRM_IOCTL_ARGS)
+/**
+ * Get a mapping information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_map structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the mapping with the specified offset and copies its information
+ * into userspace
+ */
+/* LINTED */
+int drm_getmap(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_map_t map;
- drm_local_map_t *mapinlist;
- int idx;
- int i = 0;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_map_32_t map32;
-
- DRM_COPYFROM_WITH_RETURN(&map32, (void *)data, sizeof (map32));
- map.offset = map32.offset;
- map.size = map32.size;
- map.type = map32.type;
- map.flags = map32.flags;
- map.handle = map32.handle;
- map.mtrr = map32.mtrr;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&map, (void *)data, sizeof (map));
-
- idx = (int)map.offset;
-
- DRM_LOCK();
- if (idx < 0) {
- DRM_UNLOCK();
- return (EINVAL);
- }
-
- TAILQ_FOREACH(mapinlist, &dev->maplist, link) {
+ struct drm_map *map = data;
+ struct drm_map_list *r_list = NULL;
+ struct list_head *list;
+ int idx;
+ int i;
+
+ idx = (int)map->offset;
+ if (idx < 0)
+ return -EINVAL;
+
+ i = 0;
+ mutex_lock(&dev->struct_mutex);
+ list_for_each(list, &dev->maplist) {
if (i == idx) {
- map.offset = mapinlist->offset;
- map.size = mapinlist->size;
- map.type = mapinlist->type;
- map.flags = mapinlist->flags;
- map.handle = (unsigned long long)(uintptr_t)
- mapinlist->handle;
- map.mtrr = mapinlist->mtrr;
+ r_list = list_entry(list, struct drm_map_list, head);
break;
}
i++;
}
+ if (!r_list || !r_list->map) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
- DRM_UNLOCK();
-
- if (mapinlist == NULL)
- return (EINVAL);
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_map_32_t map32;
-
- map32.offset = map.offset;
- map32.size = (uint32_t)map.size;
- map32.type = map.type;
- map32.flags = map.flags;
- map32.handle = (uintptr_t)map.handle;
- map32.mtrr = map.mtrr;
- DRM_COPYTO_WITH_RETURN((void *)data, &map32, sizeof (map32));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data, &map, sizeof (map));
+ map->offset = r_list->map->offset;
+ map->size = r_list->map->size;
+ map->type = r_list->map->type;
+ map->flags = r_list->map->flags;
+ map->handle = (unsigned long long)(uintptr_t)r_list->user_token;
+ map->mtrr = r_list->map->mtrr;
+ mutex_unlock(&dev->struct_mutex);
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_getclient(DRM_IOCTL_ARGS)
+/**
+ * Get client information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_client structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the client with the specified index and copies its information
+ * into userspace
+ */
+/* LINTED */
+int drm_getclient(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_client_t client;
- drm_file_t *pt;
- int idx;
- int i = 0;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_client_32_t client32;
-
- DRM_COPYFROM_WITH_RETURN(&client32, (void *)data,
- sizeof (client32));
- client.idx = client32.idx;
- client.auth = client32.auth;
- client.pid = client32.pid;
- client.uid = client32.uid;
- client.magic = client32.magic;
- client.iocs = client32.iocs;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&client, (void *)data,
- sizeof (client));
-
- idx = client.idx;
- DRM_LOCK();
- TAILQ_FOREACH(pt, &dev->files, link) {
- if (i == idx) {
- client.auth = pt->authenticated;
- client.pid = pt->pid;
- client.uid = pt->uid;
- client.magic = pt->magic;
- client.iocs = pt->ioctl_count;
- DRM_UNLOCK();
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) ==
- DDI_MODEL_ILP32) {
- drm_client_32_t client32;
-
- client32.idx = client.idx;
- client32.auth = client.auth;
- client32.pid = (uint32_t)client.pid;
- client32.uid = (uint32_t)client.uid;
- client32.magic = (uint32_t)client.magic;
- client32.iocs = (uint32_t)client.iocs;
-
- DRM_COPYTO_WITH_RETURN((void *)data, &client32,
- sizeof (client32));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data,
- &client, sizeof (client));
-
- return (0);
+ struct drm_client *client = data;
+ struct drm_file *pt;
+ int idx;
+ int i;
+
+ idx = client->idx;
+ i = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry(pt, struct drm_file, &dev->filelist, lhead){
+ if (i++ >= idx) {
+ client->auth = pt->authenticated;
+ client->pid = pt->pid;
+ client->uid = pt->uid;
+ client->magic = pt->magic;
+ client->iocs = pt->ioctl_count;
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
}
- i++;
}
- DRM_UNLOCK();
- return (EINVAL);
+ mutex_unlock(&dev->struct_mutex);
+
+ return -EINVAL;
}
-/*ARGSUSED*/
-int
-drm_getstats(DRM_IOCTL_ARGS)
+/**
+ * Get statistics information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_stats structure.
+ *
+ * \return zero on success or a negative number on failure.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_getstats(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_stats_t stats;
- int i;
-
- bzero(&stats, sizeof (stats));
+ struct drm_stats *stats = data;
+ int i;
- DRM_LOCK();
+ (void) memset(stats, 0, sizeof(*stats));
for (i = 0; i < dev->counters; i++) {
- if (dev->types[i] == _DRM_STAT_LOCK) {
- stats.data[i].value
- = (dev->lock.hw_lock
- ? dev->lock.hw_lock->lock : 0);
- } else
- stats.data[i].value = atomic_read(&dev->counts[i]);
- stats.data[i].type = dev->types[i];
+ if (dev->types[i] == _DRM_STAT_LOCK)
+ stats->data[i].value =
+ (file->master->lock.hw_lock ? file->master->lock.hw_lock->lock : 0);
+ else
+ stats->data[i].value = atomic_read(&dev->counts[i]);
+ stats->data[i].type = dev->types[i];
}
- stats.count = dev->counters;
+ stats->count = dev->counters;
- DRM_UNLOCK();
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_stats_32_t stats32;
- stats32.count = (uint32_t)stats.count;
- for (i = 0; i < 15; i++) {
- stats32.data[i].value = stats.data[i].value;
- stats32.data[i].type = stats.data[i].type;
- }
- DRM_COPYTO_WITH_RETURN((void *)data, &stats32,
- sizeof (stats32));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data, &stats, sizeof (stats));
-
- return (0);
+ return 0;
}
-#define DRM_IF_MAJOR 1
-#define DRM_IF_MINOR 2
-
-/*ARGSUSED*/
-int
-drm_setversion(DRM_IOCTL_ARGS)
+/**
+ * Get device/driver capabilities
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_getcap(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_set_version_t sv;
- drm_set_version_t retv;
- int if_version;
-
- DRM_COPYFROM_WITH_RETURN(&sv, (void *)data, sizeof (sv));
-
- retv.drm_di_major = DRM_IF_MAJOR;
- retv.drm_di_minor = DRM_IF_MINOR;
- retv.drm_dd_major = dev->driver->driver_major;
- retv.drm_dd_minor = dev->driver->driver_minor;
-
- DRM_COPYTO_WITH_RETURN((void *)data, &retv, sizeof (sv));
+ struct drm_get_cap *req = data;
+
+ req->value = 0;
+ switch (req->capability) {
+ case DRM_CAP_DUMB_BUFFER:
+ if (dev->driver->dumb_create)
+ req->value = 1;
+ break;
+ case DRM_CAP_VBLANK_HIGH_CRTC:
+ req->value = 1;
+ break;
+ case DRM_CAP_DUMB_PREFERRED_DEPTH:
+ req->value = dev->mode_config.preferred_depth;
+ break;
+ case DRM_CAP_DUMB_PREFER_SHADOW:
+ req->value = dev->mode_config.prefer_shadow;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
- if (sv.drm_di_major != -1) {
- if (sv.drm_di_major != DRM_IF_MAJOR ||
- sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
- return (EINVAL);
- if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_dd_minor);
+/**
+ * Setversion ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Sets the requested interface version
+ */
+/* LINTED */
+int drm_setversion(DRM_IOCTL_ARGS)
+{
+ struct drm_set_version *sv = data;
+ int if_version, retcode = 0;
+
+ if (sv->drm_di_major != -1) {
+ if (sv->drm_di_major != DRM_IF_MAJOR ||
+ sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
+ retcode = -EINVAL;
+ goto done;
+ }
+ if_version = DRM_IF_VERSION(sv->drm_di_major,
+ sv->drm_di_minor);
dev->if_version = DRM_MAX(if_version, dev->if_version);
- if (sv.drm_di_minor >= 1) {
+ if (sv->drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
*/
- (void) drm_set_busid(dev);
+ (void) drm_set_busid(dev, file);
}
}
- if (sv.drm_dd_major != -1) {
- if (sv.drm_dd_major != dev->driver->driver_major ||
- sv.drm_dd_minor < 0 ||
- sv.drm_dd_minor > dev->driver->driver_minor)
- return (EINVAL);
+ if (sv->drm_dd_major != -1) {
+ if (sv->drm_dd_major != dev->driver->major ||
+ sv->drm_dd_minor < 0 || sv->drm_dd_minor >
+ dev->driver->minor) {
+ retcode = -EINVAL;
+ goto done;
+ }
+
+ /* OSOL_drm: if (dev->driver->set_version)
+ dev->driver->set_version(dev, sv); */
}
- return (0);
-}
+done:
+ sv->drm_di_major = DRM_IF_MAJOR;
+ sv->drm_di_minor = DRM_IF_MINOR;
+ sv->drm_dd_major = dev->driver->major;
+ sv->drm_dd_minor = dev->driver->minor;
-/*ARGSUSED*/
-int
-drm_noop(DRM_IOCTL_ARGS)
-{
- DRM_DEBUG("drm_noop\n");
- return (0);
+ return retcode;
}
-/*ARGSUSED*/
-int
-drm_version(DRM_IOCTL_ARGS)
+/** No-op ioctl. */
+/* LINTED */
+int drm_noop(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_version_t version;
- size_t len;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_version_32_t version32;
-
- DRM_COPYFROM_WITH_RETURN(&version32,
- (void *)data, sizeof (drm_version_32_t));
- version.name_len = version32.name_len;
- version.name = (char *)(uintptr_t)version32.name;
- version.date_len = version32.date_len;
- version.date = (char *)(uintptr_t)version32.date;
- version.desc_len = version32.desc_len;
- version.desc = (char *)(uintptr_t)version32.desc;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&version, (void *)data,
- sizeof (version));
-
-#define DRM_COPY(name, value) \
- len = strlen(value); \
- if (len > name##_len) len = name##_len; \
- name##_len = strlen(value); \
- if (len && name) { \
- if (DRM_COPY_TO_USER(name, value, len)) \
- return (EFAULT); \
- }
-
- version.version_major = dev->driver->driver_major;
- version.version_minor = dev->driver->driver_minor;
- version.version_patchlevel = dev->driver->driver_patchlevel;
-
- DRM_COPY(version.name, dev->driver->driver_name);
- DRM_COPY(version.date, dev->driver->driver_date);
- DRM_COPY(version.desc, dev->driver->driver_desc);
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_version_32_t version32;
-
- version32.version_major = version.version_major;
- version32.version_minor = version.version_minor;
- version32.version_patchlevel = version.version_patchlevel;
- version32.name_len = (uint32_t)version.name_len;
- version32.name = (caddr32_t)(uintptr_t)version.name;
- version32.date_len = (uint32_t)version.date_len;
- version32.date = (caddr32_t)(uintptr_t)version.date;
- version32.desc_len = (uint32_t)version.desc_len;
- version32.desc = (caddr32_t)(uintptr_t)version.desc;
- DRM_COPYTO_WITH_RETURN((void *)data, &version32,
- sizeof (drm_version_32_t));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data, &version,
- sizeof (version));
-
- return (0);
+ DRM_DEBUG("\n");
+ return 0;
}
+
diff --git a/usr/src/uts/common/io/drm/drm_irq.c b/usr/src/uts/common/io/drm/drm_irq.c
index 3d3640a..6a875bd 100644
--- a/usr/src/uts/common/io/drm/drm_irq.c
+++ b/usr/src/uts/common/io/drm/drm_irq.c
@@ -1,10 +1,21 @@
/*
- * drm_irq.c -- IRQ IOCTL and function support
- * Created: Fri Oct 18 2003 by anholt@FreeBSD.org
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*/
+
+/**
+ * \file drm_irq.c
+ * IRQ support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
/*
- * Copyright 2003 Eric Anholt
- * Copyright (c) 2009, Intel Corporation.
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2013, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -21,375 +32,1185 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
- * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <anholt@FreeBSD.org>
- *
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*/
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#include "drmP.h"
#include "drm.h"
+#include "drmP.h"
#include "drm_io32.h"
-/*ARGSUSED*/
-int
-drm_irq_by_busid(DRM_IOCTL_ARGS)
+static irqreturn_t __irq_handler_wrap(DRM_IRQ_ARGS)
{
- DRM_DEVICE;
- drm_irq_busid_t irq;
+ drm_device_t *dev = (void *)arg;
+ int ret;
- DRM_COPYFROM_WITH_RETURN(&irq, (void *)data, sizeof (irq));
+ mutex_enter(&dev->irq_lock);
+ ret = dev->driver->irq_handler(arg);
+ mutex_exit(&dev->irq_lock);
- if ((irq.busnum >> 8) != dev->pci_domain ||
- (irq.busnum & 0xff) != dev->pci_bus ||
- irq.devnum != dev->pci_slot ||
- irq.funcnum != dev->pci_func)
- return (EINVAL);
+ return (ret);
+}
+
+/* LINTED */
+static irqreturn_t __irq_handler_wrap_msi(caddr_t arg1, caddr_t arg2)
+{
+ drm_device_t *dev = (void *)arg1;
+ int ret;
+
+ mutex_enter(&dev->irq_lock);
+ ret = dev->driver->irq_handler(arg1);
+ mutex_exit(&dev->irq_lock);
- irq.irq = dev->irq;
+ return (ret);
+}
- DRM_DEBUG("%d:%d:%d => IRQ %d\n",
- irq.busnum, irq.devnum, irq.funcnum, irq.irq);
+static int __install_irq_handler(struct drm_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int i, ret;
+
+ if (pdev->msi_handle) {
+ /* Call ddi_intr_add_handler() */
+ for (i = 0; i < pdev->msi_actual; i++) {
+ ret = ddi_intr_add_handler(pdev->msi_handle[i],
+ __irq_handler_wrap_msi, (caddr_t)dev, NULL);
+ if (ret != DDI_SUCCESS) {
+ DRM_DEBUG("ddi_intr_add_handler() failed");
+ return (ret);
+ }
+ }
- DRM_COPYTO_WITH_RETURN((void *)data, &irq, sizeof (irq));
+ if (pdev->msi_flag & DDI_INTR_FLAG_BLOCK) {
+ /* Call ddi_intr_block_enable() for MSI */
+ (void) ddi_intr_block_enable(pdev->msi_handle, pdev->msi_actual);
+ } else {
+ /* Call ddi_intr_enable() for MSI non block enable */
+ for (i = 0; i < pdev->msi_actual; i++)
+ (void) ddi_intr_enable(pdev->msi_handle[i]);
+ }
+ } else {
+ /* setup the interrupt handler */
+ if (ddi_add_intr(dev->devinfo, 0, &pdev->intr_block,
+ (ddi_idevice_cookie_t *)NULL, __irq_handler_wrap,
+ (caddr_t)dev) != DDI_SUCCESS) {
+ DRM_ERROR("ddi_add_intr failed");
+ return (DDI_FAILURE);
+ }
+ }
- return (0);
+ return (DDI_SUCCESS);
}
+static void __uninstall_irq_handler(struct drm_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int i;
+
+ ASSERT(dev->devinfo);
+
+ if (pdev->msi_handle) {
+ /* Disable all interrupts */
+ if (pdev->msi_flag & DDI_INTR_FLAG_BLOCK) {
+ /* Call ddi_intr_block_disable() */
+ (void) ddi_intr_block_disable(pdev->msi_handle, pdev->msi_actual);
+ } else {
+ for (i = 0; i < pdev->msi_actual; i++)
+ (void) ddi_intr_disable(pdev->msi_handle[i]);
+ }
+
+ /* Call ddi_intr_remove_handler() */
+ for (i = 0; i < pdev->msi_actual; i++){
+ (void) ddi_intr_remove_handler(pdev->msi_handle[i]);
+ }
+ } else {
+ ddi_remove_intr(dev->devinfo, 0, pdev->intr_block);
+ }
+}
-static irqreturn_t
-drm_irq_handler_wrap(DRM_IRQ_ARGS)
+int
+pci_enable_msi(struct pci_dev *pdev)
{
- drm_device_t *dev = (void *)arg;
- int ret;
+ struct drm_device *dev = pdev->dev;
+ dev_info_t *devinfo = dev->devinfo;
+ int count, avail, actual;
+ int types;
+ int i, ret;
+
+ /* Get supported interrupt types */
+ if (ddi_intr_get_supported_types(dev->devinfo, &types) != DDI_SUCCESS) {
+ DRM_DEBUG("ddi_intr_get_supported_types() failed");
+ return (DDI_FAILURE);
+ }
+ if (!(types & DDI_INTR_TYPE_MSI))
+ return (DDI_FAILURE);
- mutex_enter(&dev->irq_lock);
- ret = dev->driver->irq_handler(arg);
- mutex_exit(&dev->irq_lock);
+ /* Get number of interrupts */
+ ret = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
+ if ((ret != DDI_SUCCESS) || (count == 0)) {
+ DRM_DEBUG("ddi_intr_get_nintrs() failed, "
+ "ret: %d, count: %d", ret, count);
+ return (ret);
+ }
+
+ /* Get number of available interrupts */
+ ret = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
+ if ((ret != DDI_SUCCESS) || (avail == 0)) {
+ DRM_DEBUG("ddi_intr_get_navail() failed, "
+ "ret: %d, avail: %d", ret, avail);
+ return (ret);
+ }
+
+ if (avail < count) {
+ DRM_DEBUG("nitrs() returned %d, navail returned %d",
+ count, avail);
+ }
+
+ /* Allocate memory for MSI interrupts */
+ pdev->msi_size = count * sizeof (ddi_intr_handle_t);
+ pdev->msi_handle = kmem_alloc(pdev->msi_size, KM_SLEEP);
+
+ ret = ddi_intr_alloc(devinfo, pdev->msi_handle, DDI_INTR_TYPE_MSI, 0,
+ count, &actual, DDI_INTR_ALLOC_NORMAL);
+
+ if ((ret != DDI_SUCCESS) || (actual == 0)) {
+ DRM_DEBUG("ddi_intr_alloc() failed: %d", ret);
+ kmem_free(pdev->msi_handle, pdev->msi_size);
+ return (ret);
+ }
+ pdev->msi_actual = actual;
+
+ /*
+ * Get priority for first msi, assume remaining are all the same
+ */
+ ret = ddi_intr_get_pri(pdev->msi_handle[0], &pdev->msi_pri);
+ if (ret != DDI_SUCCESS) {
+ DRM_DEBUG("ddi_intr_get_pri() failed: %d", ret);
+ for(i = 0; i < actual; i++)
+ (void) ddi_intr_free(pdev->msi_handle[i]);
+ kmem_free(pdev->msi_handle, pdev->msi_size);
+ return (ret);
+ }
+
+ ret = ddi_intr_get_cap(pdev->msi_handle[0], &pdev->msi_flag);
+ if (ret != DDI_SUCCESS) {
+ DRM_DEBUG("ddi_intr_get_cap() failed: %d", ret);
+ for(i = 0; i < actual; i++)
+ (void) ddi_intr_free(pdev->msi_handle[i]);
+ kmem_free(pdev->msi_handle, pdev->msi_size);
+ return (ret);
+ }
return (ret);
}
+void
+pci_disable_msi(struct pci_dev *pdev)
+{
+ int i;
+
+ for (i = 0; i < pdev->msi_actual; i++)
+ (void) ddi_intr_free(pdev->msi_handle[i]);
+ kmem_free(pdev->msi_handle, pdev->msi_size);
+ pdev->msi_handle = NULL;
+}
+
+/* Access macro for slots in vblank timestamp ringbuffer. */
+#define vblanktimestamp(dev, crtc, count) ( \
+ (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
+ ((count) % DRM_VBLANKTIME_RBSIZE)])
+
+/* Retry timestamp calculation up to 3 times to satisfy
+ * drm_timestamp_precision before giving up.
+ */
+#define DRM_TIMESTAMP_MAXRETRIES 3
+
+/* Threshold in nanoseconds for detection of redundant
+ * vblank irq in drm_handle_vblank(). 1 msec should be ok.
+ */
+#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
+
+/**
+ * Get interrupt from bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_irq_busid structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Finds the PCI device with the specified bus id and gets its IRQ number.
+ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
+ * to that of the device that this DRM instance attached to.
+ */
+/* LINTED */
+int drm_irq_by_busid(DRM_IOCTL_ARGS)
+{
+ struct drm_irq_busid *p = data;
+
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return -EINVAL;
+
+ if ((p->busnum >> 8) != dev->pdev->domain ||
+ (p->busnum & 0xff) != dev->pdev->bus ||
+ p->devnum != dev->pdev->slot || p->funcnum != dev->pdev->func)
+ return -EINVAL;
+
+ p->irq = dev->pdev->irq;
+
+ DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+ p->irq);
+
+ return 0;
+}
+
+/*
+ * Clear vblank timestamp buffer for a crtc.
+ */
+static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
+{
+ (void) memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], -1,
+ DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+}
+
+/*
+ * Disable vblank irq's on crtc, make sure that last vblank count
+ * of hardware and corresponding consistent software vblank counter
+ * are preserved, even if there are any spurious vblank irq's after
+ * disable.
+ */
+static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+ u32 vblcount;
+ s64 diff_ns;
+ int vblrc;
+ struct timeval tvblank;
+ int count = DRM_TIMESTAMP_MAXRETRIES;
+
+ /* Prevent vblank irq processing while disabling vblank irqs,
+ * so no updates of timestamps or count can happen after we've
+ * disabled. Needed to prevent races in case of delayed irq's.
+ */
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+ dev->driver->disable_vblank(dev, crtc);
+ dev->vblank_enabled[crtc] = 0;
+
+ /* No further vblank irq's will be processed after
+ * this point. Get current hardware vblank count and
+ * vblank timestamp, repeat until they are consistent.
+ *
+ * FIXME: There is still a race condition here and in
+ * drm_update_vblank_count() which can cause off-by-one
+ * reinitialization of software vblank counter. If gpu
+ * vblank counter doesn't increment exactly at the leading
+ * edge of a vblank interval, then we can lose 1 count if
+ * we happen to execute between start of vblank and the
+ * delayed gpu counter increment.
+ */
+ do {
+ dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+ vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
+ } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+
+ if (!count)
+ vblrc = 0;
+
+ /* Compute time difference to stored timestamp of last vblank
+ * as updated by last invocation of drm_handle_vblank() in vblank irq.
+ */
+ vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* If there is at least 1 msec difference between the last stored
+ * timestamp and tvblank, then we are currently executing our
+ * disable inside a new vblank interval, the tvblank timestamp
+ * corresponds to this new vblank interval and the irq handler
+ * for this vblank didn't run yet and won't run due to our disable.
+ * Therefore we need to do the job of drm_handle_vblank() and
+ * increment the vblank counter by one to account for this vblank.
+ *
+ * Skip this step if there isn't any high precision timestamp
+ * available. In that case we can't account for this and just
+ * hope for the best.
+ */
+ if ((vblrc > 0) && (abs(diff_ns) > 1000000)) {
+ atomic_inc(&dev->_vblank_count[crtc]);
+ }
+
+ /* Invalidate all timestamps while vblank irq's are off. */
+ clear_vblank_timestamps(dev, crtc);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+}
+
static void vblank_disable_fn(void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
+ unsigned long irqflags;
int i;
if (!dev->vblank_disable_allowed)
return;
for (i = 0; i < dev->num_crtcs; i++) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
- atomic_read(&dev->vblank_enabled[i]) == 1) {
- dev->last_vblank[i] =
- dev->driver->get_vblank_counter(dev, i);
- dev->driver->disable_vblank(dev, i);
- atomic_set(&dev->vblank_enabled[i], 0);
- DRM_DEBUG("disable vblank");
+ dev->vblank_enabled[i]) {
+ DRM_DEBUG("disabling vblank on crtc %d\n", i);
+ vblank_disable_and_save(dev, i);
}
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
}
-void
-drm_vblank_cleanup(struct drm_device *dev)
+void drm_vblank_cleanup(struct drm_device *dev)
{
-
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
+ del_timer(&dev->vblank_disable_timer);
+ destroy_timer(&dev->vblank_disable_timer);
+
vblank_disable_fn((void *)dev);
- drm_free(dev->vbl_queues, sizeof (wait_queue_head_t) * dev->num_crtcs,
- DRM_MEM_DRIVER);
- drm_free(dev->vbl_sigs, sizeof (struct drm_vbl_sig) * dev->num_crtcs,
- DRM_MEM_DRIVER);
- drm_free(dev->_vblank_count, sizeof (atomic_t) *
- dev->num_crtcs, DRM_MEM_DRIVER);
- drm_free(dev->vblank_refcount, sizeof (atomic_t) *
- dev->num_crtcs, DRM_MEM_DRIVER);
- drm_free(dev->vblank_enabled, sizeof (int) *
- dev->num_crtcs, DRM_MEM_DRIVER);
- drm_free(dev->last_vblank, sizeof (u32) * dev->num_crtcs,
- DRM_MEM_DRIVER);
- drm_free(dev->vblank_inmodeset, sizeof (*dev->vblank_inmodeset) *
- dev->num_crtcs, DRM_MEM_DRIVER);
+ kfree(dev->vbl_queue, sizeof (wait_queue_head_t) * dev->num_crtcs);
+ kfree(dev->_vblank_count, sizeof (atomic_t) * dev->num_crtcs);
+ kfree(dev->vblank_refcount, sizeof (atomic_t) * dev->num_crtcs);
+ kfree(dev->vblank_enabled, sizeof (int) * dev->num_crtcs);
+ kfree(dev->last_vblank, sizeof (u32) * dev->num_crtcs);
+ kfree(dev->last_vblank_wait, sizeof (u32) * dev->num_crtcs);
+ kfree(dev->vblank_inmodeset, sizeof (*dev->vblank_inmodeset) * dev->num_crtcs);
+ kfree(dev->_vblank_time, sizeof (*dev->_vblank_time) * dev->num_crtcs * DRM_VBLANKTIME_RBSIZE);
+
dev->num_crtcs = 0;
+
+ mutex_destroy(&dev->vbl_lock);
}
-int
-drm_vblank_init(struct drm_device *dev, int num_crtcs)
+int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
- int i, ret = ENOMEM;
-
- atomic_set(&dev->vbl_signal_pending, 0);
- dev->num_crtcs = num_crtcs;
+ int i, ret = -ENOMEM;
+ init_timer(&dev->vblank_disable_timer);
+ setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
+ dev);
+ mutex_init(&dev->vbl_lock, NULL, MUTEX_DRIVER, (void *)dev->pdev->intr_block);
+ spin_lock_init(&dev->vblank_time_lock);
- dev->vbl_queues = drm_alloc(sizeof (wait_queue_head_t) * num_crtcs,
- DRM_MEM_DRIVER);
- if (!dev->vbl_queues)
- goto err;
+ dev->num_crtcs = num_crtcs;
- dev->vbl_sigs = drm_alloc(sizeof (struct drm_vbl_sig) * num_crtcs,
- DRM_MEM_DRIVER);
- if (!dev->vbl_sigs)
+ dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
+ GFP_KERNEL);
+ if (!dev->vbl_queue)
goto err;
- dev->_vblank_count = drm_alloc(sizeof (atomic_t) * num_crtcs,
- DRM_MEM_DRIVER);
+ dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
if (!dev->_vblank_count)
goto err;
- dev->vblank_refcount = drm_alloc(sizeof (atomic_t) * num_crtcs,
- DRM_MEM_DRIVER);
+ dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
+ GFP_KERNEL);
if (!dev->vblank_refcount)
goto err;
- dev->vblank_enabled = drm_alloc(num_crtcs * sizeof (int),
- DRM_MEM_DRIVER);
+ dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
if (!dev->vblank_enabled)
goto err;
- dev->last_vblank = drm_alloc(num_crtcs * sizeof (u32), DRM_MEM_DRIVER);
+ dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
if (!dev->last_vblank)
goto err;
- dev->vblank_inmodeset = drm_alloc(num_crtcs * sizeof (int),
- DRM_MEM_DRIVER);
+ dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
+ if (!dev->last_vblank_wait)
+ goto err;
+
+ dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
if (!dev->vblank_inmodeset)
goto err;
+ dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
+ sizeof(struct timeval), GFP_KERNEL);
+ if (!dev->_vblank_time)
+ goto err;
+
+ DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
+
+ /* Driver specific high-precision vblank timestamping supported? */
+ if (dev->driver->get_vblank_timestamp)
+ DRM_INFO("Driver supports precise vblank timestamp query.\n");
+ else
+ DRM_INFO("No driver support for vblank timestamp query.\n");
+
/* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
- DRM_INIT_WAITQUEUE(&dev->vbl_queues[i], DRM_INTR_PRI(dev));
- TAILQ_INIT(&dev->vbl_sigs[i]);
+ DRM_INIT_WAITQUEUE(&dev->vbl_queue[i], DRM_INTR_PRI(dev));
atomic_set(&dev->_vblank_count[i], 0);
atomic_set(&dev->vblank_refcount[i], 0);
}
- dev->vblank_disable_allowed = 1;
- return (0);
+ dev->vblank_disable_allowed = 0;
+
+ return 0;
err:
- DRM_ERROR("drm_vblank_init: alloc error");
drm_vblank_cleanup(dev);
- return (ret);
+ return ret;
}
-/*ARGSUSED*/
-static int
-drm_install_irq_handle(drm_device_t *dev)
+/* LINTED */
+static void drm_irq_vgaarb_nokms(void *cookie, bool state)
{
- dev_info_t *dip = dev->dip;
+ struct drm_device *dev = cookie;
- if (dip == NULL) {
- DRM_ERROR("drm_install_irq_handle: cannot get vgatext's dip");
- return (DDI_FAILURE);
- }
-
- if (ddi_intr_hilevel(dip, 0) != 0) {
- DRM_ERROR("drm_install_irq_handle: "
- "high-level interrupts are not supported");
- return (DDI_FAILURE);
+ if (dev->driver->vgaarb_irq) {
+ dev->driver->vgaarb_irq(dev, state);
+ return;
}
- if (ddi_get_iblock_cookie(dip, (uint_t)0,
- &dev->intr_block) != DDI_SUCCESS) {
- DRM_ERROR("drm_install_irq_handle: cannot get iblock cookie");
- return (DDI_FAILURE);
- }
+ if (!dev->irq_enabled)
+ return;
- /* setup the interrupt handler */
- if (ddi_add_intr(dip, 0, &dev->intr_block,
- (ddi_idevice_cookie_t *)NULL, drm_irq_handler_wrap,
- (caddr_t)dev) != DDI_SUCCESS) {
- DRM_ERROR("drm_install_irq_handle: ddi_add_intr failed");
- return (DDI_FAILURE);
+ if (state) {
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+ } else {
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
+ if (dev->driver->irq_postinstall)
+ dev->driver->irq_postinstall(dev);
}
-
- return (DDI_SUCCESS);
}
-/*ARGSUSED*/
-int
-drm_irq_install(drm_device_t *dev)
+/**
+ * Install IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Initializes the IRQ related data. Installs the handler, calling the driver
+ * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
+ * before and after the installation.
+ */
+int drm_irq_install(struct drm_device *dev)
{
int ret;
- if (dev->dev_private == NULL) {
- DRM_ERROR("drm_irq_install: dev_private is NULL");
- return (EINVAL);
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return -EINVAL;
+
+ if (dev->pdev->irq == 0)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* Driver must have been initialized */
+ if (!dev->dev_private) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
}
if (dev->irq_enabled) {
- DRM_ERROR("drm_irq_install: irq already enabled");
- return (EBUSY);
+ mutex_unlock(&dev->struct_mutex);
+ return -EBUSY;
}
+ dev->irq_enabled = 1;
+ mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("drm_irq_install irq=%d\n", dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
- /* before installing handler */
- ret = dev->driver->irq_preinstall(dev);
- if (ret)
- return (EINVAL);
+ /* Before installing handler */
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
- /* install handler */
- ret = drm_install_irq_handle(dev);
+ /* Install handler */
+ ret = __install_irq_handler(dev);
if (ret != DDI_SUCCESS) {
- DRM_ERROR("drm_irq_install: drm_install_irq_handle failed");
- return (ret);
+ DRM_ERROR("IRQ handler installation failed");
+ mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+ return -EFAULT;
}
- /* after installing handler */
- dev->driver->irq_postinstall(dev);
+ /* After installing handler */
+ if (dev->driver->irq_postinstall)
+ ret = dev->driver->irq_postinstall(dev);
+ if (ret < 0) {
+ mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
- dev->irq_enabled = 1;
dev->context_flag = 0;
-
- return (0);
+ return 0;
}
-static void
-drm_uninstall_irq_handle(drm_device_t *dev)
+/**
+ * Uninstall the IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
+ */
+int drm_irq_uninstall(struct drm_device * dev)
{
- ASSERT(dev->dip);
- ddi_remove_intr(dev->dip, 0, dev->intr_block);
-}
+ unsigned long irqflags;
+ int irq_enabled, i;
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return -EINVAL;
-/*ARGSUSED*/
-int
-drm_irq_uninstall(drm_device_t *dev)
-{
- int i;
- if (!dev->irq_enabled) {
- return (EINVAL);
- }
+ mutex_lock(&dev->struct_mutex);
+ irq_enabled = dev->irq_enabled;
dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
/*
* Wake up any waiters so they don't hang.
*/
- DRM_SPINLOCK(&dev->vbl_lock);
- for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vbl_queues[i]);
- dev->vblank_enabled[i] = 0;
+ if (dev->num_crtcs) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for (i = 0; i < dev->num_crtcs; i++) {
+ DRM_WAKEUP(&dev->vbl_queue[i]);
+ dev->vblank_enabled[i] = 0;
+ dev->last_vblank[i] =
+ dev->driver->get_vblank_counter(dev, i);
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
- DRM_SPINUNLOCK(&dev->vbl_lock);
- dev->driver->irq_uninstall(dev);
- drm_uninstall_irq_handle(dev);
- dev->locked_tasklet_func = NULL;
+ if (!irq_enabled)
+ return -EINVAL;
- return (DDI_SUCCESS);
+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+
+ __uninstall_irq_handler(dev);
+
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_control(DRM_IOCTL_ARGS)
+/**
+ * IRQ control ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_control structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls irq_install() or irq_uninstall() according to \p arg.
+ */
+/* LINTED */
+int drm_control(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_control_t ctl;
- int err;
+ struct drm_control *ctl = data;
- DRM_COPYFROM_WITH_RETURN(&ctl, (void *)data, sizeof (ctl));
+ /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
- switch (ctl.func) {
+
+ switch (ctl->func) {
case DRM_INST_HANDLER:
- /*
- * Handle drivers whose DRM used to require IRQ setup but the
- * no longer does.
- */
- return (drm_irq_install(dev));
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+ if (dev->if_version < DRM_IF_VERSION(1, 2) &&
+ ctl->irq != dev->pdev->irq)
+ return -EINVAL;
+ return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
- err = drm_irq_uninstall(dev);
- return (err);
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+ return drm_irq_uninstall(dev);
default:
- return (EINVAL);
+ return -EINVAL;
}
}
-u32
-drm_vblank_count(struct drm_device *dev, int crtc)
+/**
+ * drm_calc_timestamping_constants - Calculate and
+ * store various constants which are later needed by
+ * vblank and swap-completion timestamping, e.g, by
+ * drm_calc_vbltimestamp_from_scanoutpos().
+ * They are derived from crtc's true scanout timing,
+ * so they take things like panel scaling or other
+ * adjustments into account.
+ *
+ * @crtc drm_crtc whose timestamp constants should be updated.
+ *
+ */
+void drm_calc_timestamping_constants(struct drm_crtc *crtc)
{
- return (atomic_read(&dev->_vblank_count[crtc]));
+ s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+ u64 dotclock;
+
+ /* Dot clock in Hz: */
+ dotclock = (u64) crtc->hwmode.clock * 1000;
+
+ /* Fields of interlaced scanout modes are only halve a frame duration.
+ * Double the dotclock to get halve the frame-/line-/pixelduration.
+ */
+ if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
+ dotclock *= 2;
+
+ /* Valid dotclock? */
+ if (dotclock > 0) {
+ int frame_size;
+ /* Convert scanline length in pixels and video dot clock to
+ * line duration, frame duration and pixel duration in
+ * nanoseconds:
+ */
+ pixeldur_ns = (s64) div_u64(1000000000, dotclock);
+ linedur_ns = (s64) div_u64(((u64) crtc->hwmode.crtc_htotal *
+ 1000000000), dotclock);
+ frame_size = crtc->hwmode.crtc_htotal *
+ crtc->hwmode.crtc_vtotal;
+ framedur_ns = (s64) div_u64((u64) frame_size * 1000000000,
+ dotclock);
+ } else
+ DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+ crtc->base.id);
+
+ crtc->pixeldur_ns = pixeldur_ns;
+ crtc->linedur_ns = linedur_ns;
+ crtc->framedur_ns = framedur_ns;
+
+ DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+ crtc->base.id, crtc->hwmode.crtc_htotal,
+ crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
+ DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+ crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
+ (int) linedur_ns, (int) pixeldur_ns);
}
+/**
+ * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
+ * drivers. Implements calculation of exact vblank timestamps from
+ * given drm_display_mode timings and current video scanout position
+ * of a crtc. This can be called from within get_vblank_timestamp()
+ * implementation of a kms driver to implement the actual timestamping.
+ *
+ * Should return timestamps conforming to the OML_sync_control OpenML
+ * extension specification. The timestamp corresponds to the end of
+ * the vblank interval, aka start of scanout of topmost-leftmost display
+ * pixel in the following video frame.
+ *
+ * Requires support for optional dev->driver->get_scanout_position()
+ * in kms driver, plus a bit of setup code to provide a drm_display_mode
+ * that corresponds to the true scanout timing.
+ *
+ * The current implementation only handles standard video modes. It
+ * returns as no operation if a doublescan or interlaced video mode is
+ * active. Higher level code is expected to handle this.
+ *
+ * @dev: DRM device.
+ * @crtc: Which crtc's vblank timestamp to retrieve.
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs).
+ * On return contains true maximum error of timestamp.
+ * @vblank_time: Pointer to struct timeval which should receive the timestamp.
+ * @flags: Flags to pass to driver:
+ * 0 = Default.
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ * @refcrtc: drm_crtc* of crtc which defines scanout timing.
+ *
+ * Returns negative value on error, failure or if not supported in current
+ * video mode:
+ *
+ * -EINVAL - Invalid crtc.
+ * -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP - Function not supported in current display mode.
+ * -EIO - Failed, e.g., due to failed scanout position query.
+ *
+ * Returns or'ed positive status flags on success:
+ *
+ * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
+ * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
+ *
+ */
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ struct drm_crtc *refcrtc)
+{
+ struct timeval stime, raw_time;
+ struct drm_display_mode *mode;
+ int vbl_status, vtotal, vdisplay;
+ int vpos, hpos, i;
+ s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+ bool invbl;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Scanout position query not supported? Should not happen. */
+ if (!dev->driver->get_scanout_position) {
+ DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+ return -EIO;
+ }
+
+ mode = &refcrtc->hwmode;
+ vtotal = mode->crtc_vtotal;
+ vdisplay = mode->crtc_vdisplay;
+
+ /* Durations of frames, lines, pixels in nanoseconds. */
+ framedur_ns = refcrtc->framedur_ns;
+ linedur_ns = refcrtc->linedur_ns;
+ pixeldur_ns = refcrtc->pixeldur_ns;
+
+ /* If mode timing undefined, just return as no-op:
+ * Happens during initial modesetting of a crtc.
+ */
+ if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
+ DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+ return -EAGAIN;
+ }
+
+ /* Get current scanout position with system timestamp.
+ * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
+ * if single query takes longer than max_error nanoseconds.
+ *
+ * This guarantees a tight bound on maximum error if
+ * code gets preempted or delayed for some reason.
+ */
+ for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
+ /* Disable preemption to make it very likely to
+ * succeed in the first iteration even on PREEMPT_RT kernel.
+ */
+
+ /* Get system timestamp before query. */
+ do_gettimeofday(&stime);
+
+ /* Get vertical and horizontal scanout pos. vpos, hpos. */
+ vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
+
+ /* Get system timestamp after query. */
+ do_gettimeofday(&raw_time);
+
+ /* Return as no-op if scanout query unsupported or failed. */
+ if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
+ DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
+ crtc, vbl_status);
+ return -EIO;
+ }
+
+ duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+
+ /* Accept result with < max_error nsecs timing uncertainty. */
+ if (duration_ns <= (s64) *max_error)
+ break;
+ }
+
+ /* Noisy system timing? */
+ if (i == DRM_TIMESTAMP_MAXRETRIES) {
+ DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
+ crtc, (int) duration_ns/1000, *max_error/1000, i);
+ }
+
+ /* Return upper bound of timestamp precision error. */
+ *max_error = (int) duration_ns;
+
+ /* Check if in vblank area:
+ * vpos is >=0 in video scanout area, but negative
+ * within vblank area, counting down the number of lines until
+ * start of scanout.
+ */
+ invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
+
+ /* Convert scanout position into elapsed time at raw_time query
+ * since start of scanout at first display scanline. delta_ns
+ * can be negative if start of scanout hasn't happened yet.
+ */
+ delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
+
+ /* Is vpos outside nominal vblank area, but less than
+ * 1/100 of a frame height away from start of vblank?
+ * If so, assume this isn't a massively delayed vblank
+ * interrupt, but a vblank interrupt that fired a few
+ * microseconds before true start of vblank. Compensate
+ * by adding a full frame duration to the final timestamp.
+ * Happens, e.g., on ATI R500, R600.
+ *
+ * We only do this if DRM_CALLED_FROM_VBLIRQ.
+ */
+ if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
+ ((vdisplay - vpos) < vtotal / 100)) {
+ delta_ns = delta_ns - framedur_ns;
+
+ /* Signal this correction as "applied". */
+ vbl_status |= 0x8;
+ }
+
+ /* Subtract time delta from raw timestamp to get final
+ * vblank_time timestamp for end of vblank.
+ */
+ ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns, vblank_time);
+
+ DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
+ crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec,
+ raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec,
+ (int) duration_ns/1000, i);
+
+ vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
+ if (invbl)
+ vbl_status |= DRM_VBLANKTIME_INVBL;
+
+ return vbl_status;
+}
+
+/**
+ * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
+ * vblank interval.
+ *
+ * @dev: DRM device
+ * @crtc: which crtc's vblank timestamp to retrieve
+ * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+ * 0 = Default.
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ *
+ * Fetches the system timestamp corresponding to the time of the most recent
+ * vblank interval on specified crtc. May call into kms-driver to
+ * compute the timestamp with a high-precision GPU specific method.
+ *
+ * Returns zero if timestamp originates from uncorrected do_gettimeofday()
+ * call, i.e., it isn't very precisely locked to the true vblank.
+ *
+ * Returns non-zero if timestamp is considered to be very precise.
+ */
+u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags)
+{
+ int ret = 0;
+
+ /* Define requested maximum error on timestamps (nanoseconds). */
+ int max_error = (int) drm_timestamp_precision * 1000;
+
+ /* Query driver if possible and precision timestamping enabled. */
+ if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
+ ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+ tvblank, flags);
+ if (ret > 0)
+ return (u32) ret;
+ }
+
+ /* GPU high precision timestamp query unsupported or failed.
+ * Return gettimeofday timestamp as best estimate.
+ */
+ do_gettimeofday(tvblank);
+
+ return 0;
+}
+
+/**
+ * drm_vblank_count - retrieve "cooked" vblank counter value
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ */
+u32 drm_vblank_count(struct drm_device *dev, int crtc)
+{
+ return atomic_read(&dev->_vblank_count[crtc]);
+}
+
+/**
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
+ * and the system timestamp corresponding to that vblank counter value.
+ *
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current value vblank counter
+ * value.
+ */
+u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime)
+{
+ u32 cur_vblank;
+
+ /* Read timestamp from slot of _vblank_time ringbuffer
+ * that corresponds to current vblank count. Retry if
+ * count has incremented during readout. This works like
+ * a seqlock.
+ */
+ do {
+ cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+ *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+ } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+
+ return cur_vblank;
+}
+
+/* LINTED */
+static void send_vblank_event(struct drm_device *dev,
+ struct drm_pending_vblank_event *e,
+ unsigned long seq, struct timeval *now)
+{
+ e->event.sequence = (u32) seq;
+ e->event.tv_sec = now->tv_sec;
+ e->event.tv_usec = now->tv_usec;
+
+ list_add_tail(&e->base.link, &e->base.file_priv->event_list, (caddr_t)&e->base);
+ DRM_WAKEUP(&e->base.file_priv->event_wait);
+}
+
+/**
+ * drm_send_vblank_event - helper to send vblank event after pageflip
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ */
+void drm_send_vblank_event(struct drm_device *dev, int crtc,
+ struct drm_pending_vblank_event *e)
+{
+ struct timeval now;
+ unsigned int seq;
+ if (crtc >= 0) {
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+ } else {
+ seq = 0;
+
+ do_gettimeofday(&now);
+ }
+ e->pipe = crtc;
+ send_vblank_event(dev, e, seq, &now);
+}
+
+/**
+ * drm_update_vblank_count - update the master vblank counter
+ * @dev: DRM device
+ * @crtc: counter to update
+ *
+ * Call back into the driver to update the appropriate vblank counter
+ * (specified by @crtc). Deal with wraparound, if it occurred, and
+ * update the last read value so we can deal with wraparound on the next
+ * call if necessary.
+ *
+ * Only necessary when going from off->on, to account for frames we
+ * didn't get an interrupt for.
+ *
+ * Note: caller must hold dev->vbl_lock since this reads & writes
+ * device vblank fields.
+ */
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
- u32 cur_vblank, diff;
+ u32 cur_vblank, diff, tslot, rc;
+ struct timeval t_vblank;
+
/*
* Interrupts were disabled prior to this call, so deal with counter
* wrap if needed.
* NOTE! It's possible we lost a full dev->max_vblank_count events
* here if the register is small or we had vblank interrupts off for
* a long time.
+ *
+ * We repeat the hardware vblank counter & timestamp query until
+ * we get consistent results. This to prevent races between gpu
+ * updating its hardware counter while we are retrieving the
+ * corresponding vblank timestamp.
*/
- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ do {
+ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
+ } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+
+ /* Deal with counter wrap */
diff = cur_vblank - dev->last_vblank[crtc];
if (cur_vblank < dev->last_vblank[crtc]) {
diff += dev->max_vblank_count;
- DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
- crtc, dev->last_vblank[crtc], cur_vblank, diff);
+
+ DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+ crtc, dev->last_vblank[crtc], cur_vblank, diff);
+ }
+
+ DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
+ crtc, diff);
+
+ /* Reinitialize corresponding vblank timestamp if high-precision query
+ * available. Skip this step if query unsupported or failed. Will
+ * reinitialize delayed at next vblank interrupt in that case.
+ */
+ if (rc) {
+ tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+ vblanktimestamp(dev, crtc, tslot) = t_vblank;
}
atomic_add(diff, &dev->_vblank_count[crtc]);
}
-static timeout_id_t timer_id = NULL;
-
-int
-drm_vblank_get(struct drm_device *dev, int crtc)
+/**
+ * drm_vblank_get - get a reference count on vblank events
+ * @dev: DRM device
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * RETURNS
+ * Zero on success, nonzero on failure.
+ */
+int drm_vblank_get(struct drm_device *dev, int crtc)
{
+ unsigned long irqflags, irqflags2;
int ret = 0;
- DRM_SPINLOCK(&dev->vbl_lock);
-
- if (timer_id != NULL) {
- (void) untimeout(timer_id);
- timer_id = NULL;
- }
-
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
- atomic_add(1, &dev->vblank_refcount[crtc]);
- if (dev->vblank_refcount[crtc] == 1 &&
- atomic_read(&dev->vblank_enabled[crtc]) == 0) {
- ret = dev->driver->enable_vblank(dev, crtc);
- if (ret)
+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
+ if (!dev->vblank_enabled[crtc]) {
+ ret = dev->driver->enable_vblank(dev, crtc);
+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
+ crtc, ret);
+ if (ret)
+ atomic_dec(&dev->vblank_refcount[crtc]);
+ else {
+ dev->vblank_enabled[crtc] = 1;
+ drm_update_vblank_count(dev, crtc);
+ }
+ }
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
+ } else {
+ if (!dev->vblank_enabled[crtc]) {
atomic_dec(&dev->vblank_refcount[crtc]);
- else {
- atomic_set(&dev->vblank_enabled[crtc], 1);
- drm_update_vblank_count(dev, crtc);
+ ret = -EINVAL;
}
}
- DRM_SPINUNLOCK(&dev->vbl_lock);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- return (ret);
+ return ret;
}
-void
-drm_vblank_put(struct drm_device *dev, int crtc)
+/**
+ * drm_vblank_put - give up ownership of vblank events
+ * @dev: DRM device
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible.
+ */
+void drm_vblank_put(struct drm_device *dev, int crtc)
{
- DRM_SPINLOCK(&dev->vbl_lock);
+ BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
+
/* Last user schedules interrupt disable */
- atomic_dec(&dev->vblank_refcount[crtc]);
+ if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
+ (drm_vblank_offdelay > 0))
+ mod_timer(&dev->vblank_disable_timer,
+ ((drm_vblank_offdelay * DRM_HZ)/1000));
+}
- if (dev->vblank_refcount[crtc] == 0)
- timer_id = timeout(vblank_disable_fn, (void *) dev, 5*DRM_HZ);
+/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Caller must hold event lock.
+ */
+void drm_vblank_off(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned int seq;
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ vblank_disable_and_save(dev, crtc);
+ DRM_WAKEUP(&dev->vbl_queue[crtc]);
+
+ /* Send any queued vblank events, lest the natives grow disquiet */
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+ spin_lock(&dev->event_lock);
+ list_for_each_entry_safe(e, t, struct drm_pending_vblank_event,
+ &dev->vblank_event_list, base.link) {
+ if (e->pipe != crtc)
+ continue;
+ DRM_DEBUG("Sending premature vblank event on disable: \
+ wanted %d, current %d\n",
+ e->event.sequence, seq);
+ list_del(&e->base.link);
+ drm_vblank_put(dev, e->pipe);
+ send_vblank_event(dev, e, seq, &now);
+ }
+ spin_unlock(&dev->event_lock);
- DRM_SPINUNLOCK(&dev->vbl_lock);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
-/*
+/**
+ * drm_vblank_pre_modeset - account for vblanks across mode sets
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @post: post or pre mode set?
+ *
+ * Account for vblank events across mode setting events, which will likely
+ * reset the hardware frame counter.
+ */
+void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+{
+ /* vblank is not initialized (IRQ not installed ?) */
+ if (!dev->num_crtcs)
+ return;
+ /*
+ * To avoid all the problems that might happen if interrupts
+ * were enabled/disabled around or between these calls, we just
+ * have the kernel take a reference on the CRTC (just once though
+ * to avoid corrupting the count if multiple, mismatch calls occur),
+ * so that interrupts remain enabled in the interim.
+ */
+ if (!dev->vblank_inmodeset[crtc]) {
+ dev->vblank_inmodeset[crtc] = 0x1;
+ if (drm_vblank_get(dev, crtc) == 0)
+ dev->vblank_inmodeset[crtc] |= 0x2;
+ }
+}
+
+void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
+ if (!dev->num_crtcs)
+ return;
+
+ if (dev->vblank_inmodeset[crtc]) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ dev->vblank_disable_allowed = 1;
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+ if (dev->vblank_inmodeset[crtc] & 0x2)
+ drm_vblank_put(dev, crtc);
+
+ dev->vblank_inmodeset[crtc] = 0;
+ }
+}
+
+/**
* drm_modeset_ctl - handle vblank event counter changes across mode switch
* @DRM_IOCTL_ARGS: standard ioctl arguments
*
@@ -400,182 +1221,305 @@ drm_vblank_put(struct drm_device *dev, int crtc)
* enabled around this call, we don't have to do anything since the counter
* will have already been incremented.
*/
-/*ARGSUSED*/
-int
-drm_modeset_ctl(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_modeset_ctl(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- struct drm_modeset_ctl modeset;
- int crtc, ret = 0;
+ struct drm_modeset_ctl *modeset = data;
+ unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
- goto out;
+ return 0;
- DRM_COPYFROM_WITH_RETURN(&modeset, (void *)data,
- sizeof (modeset));
+ /* KMS drivers handle this internally */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
- crtc = modeset.crtc;
- if (crtc >= dev->num_crtcs) {
- ret = -EINVAL;
- goto out;
- }
+ crtc = modeset->crtc;
+ if (crtc >= dev->num_crtcs)
+ return -EINVAL;
- /*
- * To avoid all the problems that might happen if interrupts
- * were enabled/disabled around or between these calls, we just
- * have the kernel take a reference on the CRTC (just once though
- * to avoid corrupting the count if multiple, mismatch calls occur),
- * so that interrupts remain enabled in the interim.
- */
- switch (modeset.cmd) {
+ switch (modeset->cmd) {
case _DRM_PRE_MODESET:
- if (!dev->vblank_inmodeset[crtc]) {
- dev->vblank_inmodeset[crtc] = 1;
- ret = drm_vblank_get(dev, crtc);
- }
+ drm_vblank_pre_modeset(dev, crtc);
break;
case _DRM_POST_MODESET:
- if (dev->vblank_inmodeset[crtc]) {
- DRM_SPINLOCK(&dev->vbl_lock);
- dev->vblank_disable_allowed = 1;
- dev->vblank_inmodeset[crtc] = 0;
- DRM_SPINUNLOCK(&dev->vbl_lock);
- drm_vblank_put(dev, crtc);
- }
+ drm_vblank_post_modeset(dev, crtc);
break;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-out:
- return (ret);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_wait_vblank(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_wait_vblank_t vblwait;
- int ret, flags, crtc;
- unsigned int sequence;
-
- if (!dev->irq_enabled) {
- DRM_ERROR("wait vblank, EINVAL");
- return (EINVAL);
- }
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_wait_vblank_32_t vblwait32;
- DRM_COPYFROM_WITH_RETURN(&vblwait32, (void *)data,
- sizeof (vblwait32));
- vblwait.request.type = vblwait32.request.type;
- vblwait.request.sequence = vblwait32.request.sequence;
- vblwait.request.signal = vblwait32.request.signal;
+static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+ union drm_wait_vblank *vblwait,
+ struct drm_file *file_priv)
+{
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned long flags;
+ unsigned int seq;
+ int ret;
+
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (e == NULL) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
+
+ e->pipe = pipe;
+ e->base.pid = ddi_get_pid();
+ e->event.base.type = DRM_EVENT_VBLANK;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = vblwait->request.signal;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy = (void (*) (void *, size_t)) kfree;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ if (file_priv->event_space < sizeof e->event) {
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+
+ file_priv->event_space -= sizeof e->event;
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
+
+ if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1 << 23)) {
+ vblwait->request.sequence = seq + 1;
+ vblwait->reply.sequence = vblwait->request.sequence;
+ }
+
+ DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+ vblwait->request.sequence, seq, pipe);
+
+ e->event.sequence = vblwait->request.sequence;
+ if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+ drm_vblank_put(dev, pipe);
+ send_vblank_event(dev, e, seq, &now);
+ pollwakeup(&e->base.file_priv->drm_pollhead, POLLIN | POLLRDNORM);
+ vblwait->reply.sequence = seq;
} else {
-#endif
- DRM_COPYFROM_WITH_RETURN(&vblwait, (void *)data,
- sizeof (vblwait));
-#ifdef _MULTI_DATAMODEL
+ /* drm_handle_vblank_events will call drm_vblank_put */
+ list_add_tail(&e->base.link, &dev->vblank_event_list, (caddr_t)&e->base);
+ vblwait->reply.sequence = vblwait->request.sequence;
}
-#endif
- if (vblwait.request.type &
- ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
- DRM_ERROR("drm_wait_vblank: wrong request type 0x%x",
- vblwait.request.type);
- return (EINVAL);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return 0;
+err_unlock:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e, sizeof(*e));
+err_put:
+ drm_vblank_put(dev, pipe);
+ return ret;
+}
+
+/**
+ * Wait for VBLANK.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param data user argument, pointing to a drm_wait_vblank structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * This function enables the vblank interrupt on the pipe requested, then
+ * sleeps waiting for the requested sequence number to occur, and drops
+ * the vblank interrupt refcount afterwards. (vblank irq disable follows that
+ * after a timeout with no further vblank waits scheduled).
+ */
+/* LINTED */
+int drm_wait_vblank(DRM_IOCTL_ARGS)
+{
+ union drm_wait_vblank *vblwait = data;
+ int ret = 0;
+ unsigned int flags, seq, crtc, high_crtc;
+
+ if ((!dev->pdev->irq) || (!dev->irq_enabled))
+ return -EINVAL;
+
+ if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
+ return -EINVAL;
+
+ if (vblwait->request.type &
+ ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK)) {
+ DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+ vblwait->request.type,
+ (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK));
+ return -EINVAL;
}
- flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
+ flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+ high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
+ if (high_crtc)
+ crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+ else
crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
- if (crtc >= dev->num_crtcs) {
- DRM_ERROR("wait vblank operation not support");
- return (ENOTSUP);
- }
+ if (crtc >= dev->num_crtcs)
+ return -EINVAL;
+
ret = drm_vblank_get(dev, crtc);
if (ret) {
- DRM_ERROR("can't get drm vblank %d", ret);
- return (ret);
+ DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
+ return ret;
}
- sequence = drm_vblank_count(dev, crtc);
+ seq = drm_vblank_count(dev, crtc);
- switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) {
+ switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
- vblwait.request.sequence += sequence;
- vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
- /*FALLTHROUGH*/
+ vblwait->request.sequence += seq;
+ vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ /* LINTED */
case _DRM_VBLANK_ABSOLUTE:
break;
default:
- DRM_DEBUG("wait vblank return EINVAL");
- return (EINVAL);
+ ret = -EINVAL;
+ goto done;
}
- if ((flags & _DRM_VBLANK_NEXTONMISS) &&
- (sequence - vblwait.request.sequence) <= (1<<23)) {
- vblwait.request.sequence = sequence + 1;
+ if (flags & _DRM_VBLANK_EVENT) {
+ /* must hold on to the vblank ref until the event fires
+ * drm_vblank_put will be called asynchronously
+ */
+ return drm_queue_vblank_event(dev, crtc, vblwait, file);
}
- if (flags & _DRM_VBLANK_SIGNAL) {
- /*
- * Don't block process, send signal when vblank interrupt
- */
- DRM_ERROR("NOT SUPPORT YET, SHOULD BE ADDED");
- cmn_err(CE_WARN, "NOT SUPPORT YET, SHOULD BE ADDED");
- ret = EINVAL;
- goto done;
- } else {
- /* block until vblank interupt */
- /* shared code returns -errno */
- DRM_WAIT_ON(ret, &dev->vbl_queues[crtc], 3 * DRM_HZ,
- (((drm_vblank_count(dev, crtc)
- - vblwait.request.sequence) <= (1 << 23)) ||
- !dev->irq_enabled));
- if (ret != EINTR) {
- struct timeval now;
- (void) uniqtime(&now);
- vblwait.reply.tval_sec = now.tv_sec;
- vblwait.reply.tval_usec = now.tv_usec;
- vblwait.reply.sequence = drm_vblank_count(dev, crtc);
- }
+ if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1<<23)) {
+ vblwait->request.sequence = seq + 1;
}
-done:
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_wait_vblank_32_t vblwait32;
- vblwait32.reply.type = vblwait.reply.type;
- vblwait32.reply.sequence = vblwait.reply.sequence;
- vblwait32.reply.tval_sec = (int32_t)vblwait.reply.tval_sec;
- vblwait32.reply.tval_usec = (int32_t)vblwait.reply.tval_usec;
- DRM_COPYTO_WITH_RETURN((void *)data, &vblwait32,
- sizeof (vblwait32));
+ DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
+ vblwait->request.sequence, crtc);
+ dev->last_vblank_wait[crtc] = vblwait->request.sequence;
+ DRM_WAIT_ON(ret, &dev->vbl_queue[crtc], 3 * DRM_HZ,
+ (((drm_vblank_count(dev, crtc) -
+ vblwait->request.sequence) <= (1 << 23)) ||
+ !dev->irq_enabled));
+
+ if (ret != -EINTR) {
+ struct timeval now;
+
+ vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
+
+ vblwait->reply.tval_sec = now.tv_sec;
+ vblwait->reply.tval_usec = now.tv_usec;
+ DRM_DEBUG("returning %d to client\n",
+ vblwait->reply.sequence);
} else {
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data, &vblwait,
- sizeof (vblwait));
-#ifdef _MULTI_DATAMODEL
+ DRM_DEBUG("vblank wait interrupted by signal\n");
}
-#endif
+done:
drm_vblank_put(dev, crtc);
- return (ret);
+ return ret;
}
-
-/*ARGSUSED*/
-void
-drm_vbl_send_signals(drm_device_t *dev)
+static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
{
- DRM_DEBUG("drm_vbl_send_signals");
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned long flags;
+ unsigned int seq;
+
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ list_for_each_entry_safe(e, t, struct drm_pending_vblank_event,
+ &dev->vblank_event_list, base.link) {
+ if (e->pipe != crtc)
+ continue;
+ if ((seq - e->event.sequence) > (1<<23))
+ continue;
+
+ DRM_DEBUG("vblank event on %d, current %d\n",
+ e->event.sequence, seq);
+
+ list_del(&e->base.link);
+ drm_vblank_put(dev, e->pipe);
+ send_vblank_event(dev, e, seq, &now);
+ pollwakeup(&e->base.file_priv->drm_pollhead, POLLIN | POLLRDNORM);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
-void
-drm_handle_vblank(struct drm_device *dev, int crtc)
+/**
+ * drm_handle_vblank - handle a vblank event
+ * @dev: DRM device
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ */
+bool drm_handle_vblank(struct drm_device *dev, int crtc)
{
- atomic_inc(&dev->_vblank_count[crtc]);
- DRM_WAKEUP(&dev->vbl_queues[crtc]);
+ u32 vblcount;
+ s64 diff_ns;
+ struct timeval tvblank;
+ unsigned long irqflags;
+
+ if (!dev->num_crtcs)
+ return false;
+
+ /* Need timestamp lock to prevent concurrent execution with
+ * vblank enable/disable, as this would cause inconsistent
+ * or corrupted timestamps and vblank counts.
+ */
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+ /* Vblank irq handling disabled. Nothing to do. */
+ if (!dev->vblank_enabled[crtc]) {
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ return false;
+ }
+
+ /* Fetch corresponding timestamp for this vblank interval from
+ * driver and store it in proper slot of timestamp ringbuffer.
+ */
+
+ /* Get current timestamp and count. */
+ vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ (void) drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+
+ /* Compute time difference to timestamp of last vblank */
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* Update vblank timestamp and count if at least
+ * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
+ * difference between last stored timestamp and current
+ * timestamp. A smaller difference means basically
+ * identical timestamps. Happens if this vblank has
+ * been already processed and this is a redundant call,
+ * e.g., due to spurious vblank interrupts. We need to
+ * ignore those for accounting.
+ */
+ if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+ /* Store new timestamp in ringbuffer. */
+ vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
+
+ /* Increment cooked vblank count. This also atomically commits
+ * the timestamp computed above.
+ */
+ atomic_inc(&dev->_vblank_count[crtc]);
+ } else {
+ DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
+ crtc, (int) diff_ns);
+ }
+
+ DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ drm_handle_vblank_events(dev, crtc);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ return true;
}
diff --git a/usr/src/uts/common/io/drm/drm_kstat.c b/usr/src/uts/common/io/drm/drm_kstat.c
index 23e51fe..c286780 100644
--- a/usr/src/uts/common/io/drm/drm_kstat.c
+++ b/usr/src/uts/common/io/drm/drm_kstat.c
@@ -1,30 +1,30 @@
/*
- * CDDL HEADER START
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
*
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
*
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
+
/*
- * Copyright 2008 Sun Microsystems, Inc.
- * All rights reserved. Use is subject to license terms.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "drmP.h"
#include <sys/kstat.h>
#include <sys/ddi.h>
@@ -43,7 +43,7 @@ static char *drmkstat_name[] = {
static int
drm_kstat_update(kstat_t *ksp, int flag)
{
- drm_device_t *sc;
+ struct drm_device *sc;
kstat_named_t *knp;
int tmp;
@@ -61,7 +61,7 @@ drm_kstat_update(kstat_t *ksp, int flag)
}
int
-drm_init_kstats(drm_device_t *sc)
+drm_init_kstats(struct drm_device *sc)
{
int instance;
kstat_t *ksp;
@@ -69,7 +69,7 @@ drm_init_kstats(drm_device_t *sc)
char *np;
char **aknp;
- instance = ddi_get_instance(sc->dip);
+ instance = ddi_get_instance(sc->devinfo);
aknp = drmkstat_name;
ksp = kstat_create("drm", instance, "drminfo", "drm",
KSTAT_TYPE_NAMED, sizeof (drmkstat_name)/sizeof (char *) - 1,
@@ -90,7 +90,7 @@ drm_init_kstats(drm_device_t *sc)
}
void
-drm_fini_kstats(drm_device_t *sc)
+drm_fini_kstats(struct drm_device *sc)
{
if (sc->asoft_ksp)
kstat_delete(sc->asoft_ksp);
diff --git a/usr/src/uts/common/io/drm/drm_linux.c b/usr/src/uts/common/io/drm/drm_linux.c
new file mode 100644
index 0000000..4f1bc6d
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_linux.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include "drm_linux.h"
+
+void
+kref_init(struct kref *kref)
+{
+ atomic_set(&kref->refcount, 1);
+}
+
+void
+kref_get(struct kref *kref)
+{
+ atomic_inc(&kref->refcount);
+}
+
+void
+kref_put(struct kref *kref, void (*release)(struct kref *kref))
+{
+ if (!atomic_dec_uint_nv(&kref->refcount))
+ release(kref);
+}
+
+unsigned int
+hweight16(unsigned int w)
+{
+ w = (w & 0x5555) + ((w >> 1) & 0x5555);
+ w = (w & 0x3333) + ((w >> 2) & 0x3333);
+ w = (w & 0x0F0F) + ((w >> 4) & 0x0F0F);
+ w = (w & 0x00FF) + ((w >> 8) & 0x00FF);
+ return (w);
+}
+
+long
+IS_ERR(const void *ptr)
+{
+ return ((unsigned long)ptr >= (unsigned long)-255);
+}
+
+#ifdef NEVER
+/*
+ * kfree wrapper to Solaris VM.
+ */
+void
+kfree(void *buf, size_t size) {
+}
+#endif
diff --git a/usr/src/uts/common/io/drm/drm_linux_list.h b/usr/src/uts/common/io/drm/drm_linux_list.h
deleted file mode 100644
index 02a4809..0000000
--- a/usr/src/uts/common/io/drm/drm_linux_list.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * drm_linux_list.h -- linux list functions for the BSDs.
- * Created: Mon Apr 7 14:30:16 1999 by anholt@FreeBSD.org
- */
-/*
- * -
- * Copyright 2003 Eric Anholt
- * Copyright (c) 2009, Intel Corporation.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <anholt@FreeBSD.org>
- *
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _DRM_LINUX_LIST_H_
-#define _DRM_LINUX_LIST_H_
-
-struct list_head {
- struct list_head *next, *prev;
- caddr_t contain_ptr;
-};
-
-/* Cheat, assume the list_head is at the start of the struct */
-#define list_entry(entry, type, member) (type *)(uintptr_t)(entry->contain_ptr)
-
-#define INIT_LIST_HEAD(head) { \
- (head)->next = head; \
- (head)->prev = head; \
- (head)->contain_ptr = (caddr_t)head; \
-}
-
-#define list_add(entry, head, con_ptr) { \
- (head)->next->prev = entry; \
- (entry)->next = (head)->next; \
- (entry)->prev = head; \
- (head)->next = entry; \
- (entry)->contain_ptr = con_ptr; \
-}
-
-#define list_add_tail(entry, head, con_ptr) { \
- (entry)->prev = (head)->prev; \
- (entry)->next = head; \
- (head)->prev->next = entry; \
- (head)->prev = entry; \
- (entry)->contain_ptr = con_ptr; \
-}
-
-#define list_del(entry) { \
- (entry)->next->prev = (entry)->prev; \
- (entry)->prev->next = (entry)->next; \
- (entry)->contain_ptr = NULL; \
-}
-
-#define list_for_each(entry, head) \
- for (entry = (head)->next; entry != head; entry = (entry)->next)
-
-#define list_for_each_safe(entry, temp, head) \
- for (entry = (head)->next, temp = (entry)->next; \
- entry != head; \
- entry = temp, temp = temp->next)
-
-#define list_del_init(entry) { \
- list_del(entry); \
- INIT_LIST_HEAD(entry); \
-}
-
-#define list_move_tail(entry, head, con_ptr) { \
- list_del(entry); \
- list_add_tail(entry, head, con_ptr); \
-}
-
-#define list_empty(head) ((head)->next == head)
-
-#endif /* _DRM_LINUX_LIST_H_ */
diff --git a/usr/src/uts/common/io/drm/drm_lock.c b/usr/src/uts/common/io/drm/drm_lock.c
index 6930a47..1e9f83f 100644
--- a/usr/src/uts/common/io/drm/drm_lock.c
+++ b/usr/src/uts/common/io/drm/drm_lock.c
@@ -1,13 +1,22 @@
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
*/
/*
- * lock.c -- IOCTLs for locking -*- linux-c -*-
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+/**
+ * \file drm_lock.c
+ * IOCTLs for locking
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
*/
+
/*
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -30,17 +39,130 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Lock ioctl.
*
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
*
+ * Add the current task to the lock wait queue, and attempt to take to lock.
*/
+/* LINTED */
+int drm_lock(DRM_IOCTL_ARGS)
+{
+ struct drm_lock *lock = data;
+ struct drm_master *master = file->master;
+ int ret = 0;
-#include "drmP.h"
+ ++file->lock_count;
+
+ if (lock->context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ DRM_CURRENTPID, lock->context);
+ return -EINVAL;
+ }
+
+ if (master->lock.hw_lock == NULL)
+ return -EINVAL;
+
+ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock->context, DRM_CURRENTPID,
+ master->lock.hw_lock->lock, lock->flags);
+
+ if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
+ if (lock->context < 0)
+ return -EINVAL;
+
+ mutex_enter(&master->lock.lock_mutex);
+ master->lock.user_waiters++;
+ for (;;) {
+ if (drm_lock_take(&master->lock, lock->context)) {
+ master->lock.file_priv = file;
+ master->lock.lock_time = ddi_get_lbolt();
+ atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+ break; /* Got lock */
+ }
+
+ ret = cv_wait_sig(&master->lock.lock_cv,
+ &master->lock.lock_mutex);
+ if (ret == 0) {
+ ret = -EINTR;
+ break;
+ }
+ }
+ master->lock.user_waiters--;
+ mutex_exit(&master->lock.lock_mutex);
+
+ DRM_DEBUG("%d %s\n", lock->context,
+ ret ? "interrupted" : "has lock");
+ if (ret) return ret;
+
+ if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
+ {
+ if (dev->driver->dma_quiescent(dev)) {
+ DRM_DEBUG("%d waiting for DMA quiescent\n",
+ lock->context);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
-int
-drm_lock_take(drm_lock_data_t *lock_data, unsigned int context)
+/**
+ * Unlock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Transfer and free the lock.
+ */
+/* LINTED */
+int drm_unlock(DRM_IOCTL_ARGS)
+{
+ struct drm_lock *lock = data;
+ struct drm_master *master = file->master;
+
+ if (lock->context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ DRM_CURRENTPID, lock->context);
+ return -EINVAL;
+ }
+
+ atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+ /* kernel_context_switch isn't used by any of the x86 drm
+ * modules but is required by the Sparc driver.
+ */
+ /* LINTED */
+ if (drm_lock_free(&master->lock, lock->context)) {
+ /* FIXME: Should really bail out here. */
+ }
+
+ return 0;
+}
+
+/**
+ * Take the heavyweight lock.
+ *
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return one if the lock is held, or zero otherwise.
+ *
+ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+int drm_lock_take(struct drm_lock_data *lock_data,
+ unsigned int context)
{
unsigned int old, new;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
@@ -49,142 +171,148 @@ drm_lock_take(drm_lock_data_t *lock_data, unsigned int context)
old = *lock;
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
- else
- new = context | _DRM_LOCK_HELD;
+ else {
+ new = context | _DRM_LOCK_HELD |
+ ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+ _DRM_LOCK_CONT : 0);
+ }
} while (!atomic_cmpset_int(lock, old, new));
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
- context);
+ context);
}
- return (0);
+ return 0;
}
}
- if (new == (context | _DRM_LOCK_HELD)) {
+
+ if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
/* Have lock */
- return (1);
+ return 1;
}
- return (0);
+ return 0;
}
-/*
+/**
* This takes a lock forcibly and hands it to context. Should ONLY be used
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
+ *
+ * \param dev DRM device.
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return always one.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as held by the given context, via the \p cmpxchg instruction.
*/
-int
-drm_lock_transfer(drm_device_t *dev, drm_lock_data_t *lock_data,
- unsigned int context)
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
+ unsigned int context)
{
unsigned int old, new;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
- dev->lock.filp = NULL;
+ lock_data->file_priv = NULL;
do {
- old = *lock;
- new = context | _DRM_LOCK_HELD;
+ old = *lock;
+ new = context | _DRM_LOCK_HELD;
} while (!atomic_cmpset_int(lock, old, new));
-
- return (1);
+ return 1;
}
-int
-drm_lock_free(drm_device_t *dev, volatile unsigned int *lock,
- unsigned int context)
+/**
+ * Free lock.
+ *
+ * \param dev DRM device.
+ * \param lock lock.
+ * \param context context.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
+ * waiting on the lock queue.
+ */
+int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
{
unsigned int old, new;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+ mutex_enter(&lock_data->lock_mutex);
+ if (lock_data->kernel_waiters != 0) {
+ (void) drm_lock_transfer(lock_data, 0);
+ lock_data->idle_has_lock = 1;
+ mutex_exit(&lock_data->lock_mutex);
+ return 1;
+ }
- mutex_enter(&(dev->lock.lock_mutex));
- dev->lock.filp = NULL;
do {
- old = *lock;
- new = 0;
+ old = *lock;
+ new = _DRM_LOCKING_CONTEXT(old);
} while (!atomic_cmpset_int(lock, old, new));
- if (_DRM_LOCK_IS_HELD(old) &&
- (_DRM_LOCKING_CONTEXT(old) != context)) {
+ if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d\n",
- context, _DRM_LOCKING_CONTEXT(old));
- mutex_exit(&(dev->lock.lock_mutex));
- return (1);
+ context, _DRM_LOCKING_CONTEXT(old));
+ mutex_exit(&lock_data->lock_mutex);
+ return 1;
}
- cv_broadcast(&(dev->lock.lock_cv));
- mutex_exit(&(dev->lock.lock_mutex));
- return (0);
+ cv_broadcast(&lock_data->lock_cv);
+ mutex_exit(&lock_data->lock_mutex);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_lock(DRM_IOCTL_ARGS)
+/**
+ * This function returns immediately and takes the hw lock
+ * with the kernel context if it is free, otherwise it gets the highest priority when and if
+ * it is eventually released.
+ *
+ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
+ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
+ * a deadlock, which is why the "idlelock" was invented).
+ *
+ * This should be sufficient to wait for GPU idle without
+ * having to worry about starvation.
+ */
+
+void drm_idlelock_take(struct drm_lock_data *lock_data)
{
- DRM_DEVICE;
- drm_lock_t lock;
int ret = 0;
- DRM_COPYFROM_WITH_RETURN(&lock, (void *)data, sizeof (lock));
-
- if (lock.context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- DRM_CURRENTPID, lock.context);
- return (EINVAL);
- }
+ mutex_enter(&lock_data->lock_mutex);
+ lock_data->kernel_waiters++;
+ if (!lock_data->idle_has_lock) {
- DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
- lock.context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
- lock.flags);
- if (dev->driver->use_dma_queue && lock.context < 0)
- return (EINVAL);
+ ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
- mutex_enter(&(dev->lock.lock_mutex));
- for (;;) {
- if (drm_lock_take(&dev->lock, lock.context)) {
- dev->lock.filp = fpriv;
- dev->lock.lock_time = ddi_get_lbolt();
- break; /* Got lock */
- }
- ret = cv_wait_sig(&(dev->lock.lock_cv),
- &(dev->lock.lock_mutex));
-
- if (ret == 0) {
- mutex_exit(&(dev->lock.lock_mutex));
- return (EINTR);
- }
+ if (ret == 1)
+ lock_data->idle_has_lock = 1;
}
- mutex_exit(&(dev->lock.lock_mutex));
- DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
-
- if (dev->driver->dma_quiescent != NULL &&
- (lock.flags & _DRM_LOCK_QUIESCENT))
- dev->driver->dma_quiescent(dev);
-
- return (0);
+ mutex_exit(&(lock_data->lock_mutex));
}
-/*ARGSUSED*/
-int
-drm_unlock(DRM_IOCTL_ARGS)
+void drm_idlelock_release(struct drm_lock_data *lock_data)
{
- DRM_DEVICE;
- drm_lock_t lock;
-
- DRM_COPYFROM_WITH_RETURN(&lock, (void *)data, sizeof (lock));
-
- DRM_DEBUG("%d (pid %d) requests unlock (0x%08x), flags = 0x%08x\n",
- lock.context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
- lock.flags);
+ unsigned int old;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
- if (lock.context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- DRM_CURRENTPID, lock.context);
- return (EINVAL);
+ mutex_enter(&lock_data->lock_mutex);
+ if (--lock_data->kernel_waiters == 0) {
+ if (lock_data->idle_has_lock) {
+ do {
+ old = *lock;
+ } while (!atomic_cmpset_int(lock, old, DRM_KERNEL_CONTEXT));
+ cv_broadcast(&lock_data->lock_cv);
+ lock_data->idle_has_lock = 0;
+ }
}
- atomic_inc_32(&dev->counts[_DRM_STAT_UNLOCKS]);
+ mutex_exit(&lock_data->lock_mutex);
+}
- DRM_LOCK();
- if (drm_lock_free(dev, &dev->lock.hw_lock->lock, lock.context)) {
- DRM_ERROR("drm_unlock\n");
- }
- DRM_UNLOCK();
- return (0);
+/* LINTED */
+int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_master *master = file_priv->master;
+ return (file_priv->lock_count && master->lock.hw_lock &&
+ _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
+ master->lock.file_priv == file_priv);
}
diff --git a/usr/src/uts/common/io/drm/drm_memory.c b/usr/src/uts/common/io/drm/drm_memory.c
index cf2d5f6..2caa401 100644
--- a/usr/src/uts/common/io/drm/drm_memory.c
+++ b/usr/src/uts/common/io/drm/drm_memory.c
@@ -1,6 +1,5 @@
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -10,7 +9,7 @@
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2012, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,44 +38,40 @@
*/
#include "drmP.h"
+#ifdef __x86
+#include "drm_linux_list.h"
+#endif
/* Device memory access structure */
+
typedef struct drm_device_iomap {
- uint_t physical; /* physical address */
- uint_t size; /* size of mapping */
- uint_t drm_regnum; /* register number */
- caddr_t drm_base; /* kernel virtual address */
- ddi_acc_handle_t drm_handle; /* data access handle */
+ uint_t paddr; /* physical address */
+ uint_t size; /* size of mapping */
+ caddr_t kvaddr; /* kernel virtual address */
+ ddi_acc_handle_t acc_handle; /* data access handle */
} drm_device_iomap_t;
-void
-drm_mem_init(void)
-{
-}
-
-void
-drm_mem_uninit(void)
-{
-}
-
-/*ARGSUSED*/
void *
drm_alloc(size_t size, int area)
{
+ _NOTE(ARGUNUSED(area))
+
return (kmem_zalloc(1 * size, KM_NOSLEEP));
}
-/*ARGSUSED*/
void *
drm_calloc(size_t nmemb, size_t size, int area)
{
+ _NOTE(ARGUNUSED(area))
+
return (kmem_zalloc(size * nmemb, KM_NOSLEEP));
}
-/*ARGSUSED*/
void *
drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
{
+ _NOTE(ARGUNUSED(area))
+
void *pt;
pt = kmem_zalloc(1 * size, KM_NOSLEEP);
@@ -85,137 +80,192 @@ drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
return (NULL);
}
if (oldpt && oldsize) {
- bcopy(pt, oldpt, oldsize);
+ bcopy(oldpt, pt, min(oldsize, size));
kmem_free(oldpt, oldsize);
}
return (pt);
}
-/*ARGSUSED*/
void
drm_free(void *pt, size_t size, int area)
{
- kmem_free(pt, size);
+ _NOTE(ARGUNUSED(area))
+
+ if (pt)
+ kmem_free(pt, size);
}
-/*ARGSUSED*/
int
-drm_get_pci_index_reg(dev_info_t *devi, uint_t physical, uint_t size,
- off_t *off)
+drm_get_pci_index_reg(dev_info_t *dip, uint_t paddr, uint_t size, off_t *off)
{
- int length;
- pci_regspec_t *regs;
- int n_reg, i;
- int regnum;
- uint_t base, regsize;
+ _NOTE(ARGUNUSED(size))
+
+ pci_regspec_t *regs = NULL;
+ int len;
+ uint_t regbase, regsize;
+ int nregs, i;
+ int regnum;
regnum = -1;
- if (ddi_dev_nregs(devi, &n_reg) == DDI_FAILURE) {
- DRM_ERROR("drm_get_pci_index_reg:ddi_dev_nregs failed\n");
- n_reg = 0;
+ if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE) {
+ DRM_ERROR("ddi_dev_nregs() failed");
return (-1);
}
- if (ddi_getlongprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
- "assigned-addresses", (caddr_t)&regs, &length) !=
- DDI_PROP_SUCCESS) {
- DRM_ERROR("drm_get_pci_index_reg: ddi_getlongprop failed!\n");
- goto error;
+ if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
+ "assigned-addresses", (caddr_t)&regs, &len) != DDI_PROP_SUCCESS) {
+ DRM_ERROR("ddi_getlongprop() failed");
+ if (regs)
+ kmem_free(regs, (size_t)len);
+ return (-1);
}
- for (i = 0; i < n_reg; i ++) {
- base = (uint_t)regs[i].pci_phys_low;
+ for (i = 0; i < nregs; i ++) {
+ regbase = (uint_t)regs[i].pci_phys_low;
regsize = (uint_t)regs[i].pci_size_low;
- if ((uint_t)physical >= base &&
- (uint_t)physical < (base + regsize)) {
+ if ((uint_t)paddr >= regbase &&
+ (uint_t)paddr < (regbase + regsize)) {
regnum = i + 1;
- *off = (off_t)(physical - base);
+ *off = (off_t)(paddr - regbase);
break;
}
}
- kmem_free(regs, (size_t)length);
+ if (regs)
+ kmem_free(regs, (size_t)len);
return (regnum);
-error:
- kmem_free(regs, (size_t)length);
- return (-1);
}
/* data access attributes structure for register access */
static ddi_device_acc_attr_t dev_attr = {
DDI_DEVICE_ATTR_V0,
+#ifdef _BIG_ENDIAN
+ DDI_STRUCTURE_LE_ACC,
+#else
DDI_NEVERSWAP_ACC,
+#endif
DDI_STRICTORDER_ACC,
+ DDI_FLAGERR_ACC
};
-int
-do_ioremap(dev_info_t *devi, drm_device_iomap_t *iomap)
+static int
+__ioremap(dev_info_t *dip, drm_device_iomap_t *iomap)
{
- int regnum;
off_t offset;
+ int regnum;
int ret;
- regnum = drm_get_pci_index_reg(devi, iomap->physical,
- iomap->size, &offset);
+ regnum = drm_get_pci_index_reg(
+ dip, iomap->paddr, iomap->size, &offset);
if (regnum < 0) {
- DRM_ERROR("do_ioremap: can not find regster entry,"
- " start=0x%x, size=0x%x", iomap->physical, iomap->size);
- return (ENXIO);
+ DRM_ERROR("can not find register entry: "
+ "paddr=0x%x, size=0x%x", iomap->paddr, iomap->size);
+ return -ENXIO;
}
- iomap->drm_regnum = regnum;
-
- ret = ddi_regs_map_setup(devi, iomap->drm_regnum,
- (caddr_t *)&(iomap->drm_base), (offset_t)offset,
- (offset_t)iomap->size, &dev_attr, &iomap->drm_handle);
- if (ret < 0) {
- DRM_ERROR("do_ioremap: failed to map regs: regno=%d,"
- " offset=0x%x", regnum, offset);
- iomap->drm_handle = NULL;
- return (EFAULT);
+ ret = ddi_regs_map_setup(dip, regnum,
+ (caddr_t *)&(iomap->kvaddr), (offset_t)offset,
+ (offset_t)iomap->size, &dev_attr, &iomap->acc_handle);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("failed to map regs: "
+ "regnum=%d, offset=0x%lx", regnum, offset);
+ iomap->acc_handle = NULL;
+ return -EFAULT;
}
- return (0);
+ return 0;
}
int
-drm_ioremap(drm_device_t *softstate, drm_local_map_t *map)
+drm_ioremap(struct drm_device *dev, struct drm_local_map *map)
{
- drm_device_iomap_t iomap;
+ struct drm_device_iomap iomap;
int ret;
- DRM_DEBUG("drm_ioremap called\n");
+ DRM_DEBUG("\n");
- bzero(&iomap, sizeof (drm_device_iomap_t));
- iomap.physical = map->offset;
+ iomap.paddr = map->offset;
iomap.size = map->size;
- ret = do_ioremap(softstate->dip, &iomap);
+ ret = __ioremap(dev->devinfo, &iomap);
if (ret) {
- DRM_ERROR("drm_ioremap: failed, physaddr=0x%x, size=0x%x",
+ DRM_ERROR("__ioremap failed: paddr=0x%lx, size=0x%lx",
map->offset, map->size);
return (ret);
}
- /* ddi_acc_handle_t */
- map->dev_handle = iomap.drm_handle;
- map->handle = (void *)iomap.drm_base;
- map->dev_addr = iomap.drm_base;
+ map->handle = (void *)iomap.kvaddr;
+ map->acc_handle = iomap.acc_handle;
DRM_DEBUG(
- "map->handle is %p map->dev_addr is %lx map->size %x",
- (void *)map->handle, (unsigned long)map->dev_addr, map->size);
+ "map->handle=%p, map->size=%lx",
+ (void *)map->handle, map->size);
return (0);
}
void
-drm_ioremapfree(drm_local_map_t *map)
+drm_ioremapfree(struct drm_local_map *map)
{
- if (map->dev_handle == NULL) {
- DRM_ERROR("drm_ioremapfree: handle is NULL");
- return;
+ if (map->acc_handle)
+ ddi_regs_map_free(&map->acc_handle);
+}
+
+#ifdef __x86
+struct drm_iomem {
+ void *addr;
+ size_t size;
+ struct list_head head;
+};
+
+struct list_head drm_iomem_list;
+
+void *
+drm_sun_ioremap(uint64_t paddr, size_t size, uint32_t mode)
+{
+ struct drm_iomem *iomem;
+ void *addr;
+
+ if (mode == DRM_MEM_CACHED)
+ mode = GFXP_MEMORY_CACHED;
+ else if (mode == DRM_MEM_UNCACHED)
+ mode = GFXP_MEMORY_UNCACHED;
+ else if (mode == DRM_MEM_WC)
+ mode = GFXP_MEMORY_WRITECOMBINED;
+ else
+ return (NULL);
+
+ addr = (void *)gfxp_alloc_kernel_space(size);
+ if(!addr)
+ return (NULL);
+ gfxp_load_kernel_space(paddr, size, mode, addr);
+ iomem = kmem_zalloc(sizeof(*iomem), KM_NOSLEEP);
+ if(!iomem){
+ gfxp_unmap_kernel_space(addr, size);
+ return (NULL);
+ }
+ iomem->addr = addr;
+ iomem->size = size;
+
+ INIT_LIST_HEAD(&iomem->head);
+ list_add(&iomem->head, &drm_iomem_list, (caddr_t)iomem);
+
+ return (addr);
+}
+
+void
+drm_sun_iounmap(void *addr)
+{
+ struct drm_iomem *iomem;
+
+ list_for_each_entry(iomem, struct drm_iomem, &drm_iomem_list, head) {
+ if (iomem->addr == addr) {
+ gfxp_unmap_kernel_space(addr, iomem->size);
+ list_del(&iomem->head);
+ kmem_free(iomem, sizeof(*iomem));
+ break;
+ }
}
- ddi_regs_map_free(&map->dev_handle);
}
+#endif /* x86 */
diff --git a/usr/src/uts/common/io/drm/drm_mm.c b/usr/src/uts/common/io/drm/drm_mm.c
index d2d70c4..4fe27ff 100644
--- a/usr/src/uts/common/io/drm/drm_mm.c
+++ b/usr/src/uts/common/io/drm/drm_mm.c
@@ -1,18 +1,22 @@
/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2013, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files(the
+ * copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
- * The above copyright notice and this permission notice(including the
+ * The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
@@ -25,312 +29,791 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
- */
+ **************************************************************************/
/*
* Generic simple memory manager implementation. Intended to be used as a base
* class implementation for more advanced memory managers.
*
* Note that the algorithm used is quite simple and there might be substantial
- * performance gains if a smarter free list is implemented.
- * Currently it is just an
+ * performance gains if a smarter free list is implemented. Currently it is just an
* unordered stack of free regions. This could easily be improved if an RB-tree
* is used instead. At least if we expect heavy fragmentation.
*
* Aligned allocations can also see improvement.
*
* Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
+#include "drm_mm.h"
+#define MM_UNUSED_TARGET 4
-unsigned long
-drm_mm_tail_space(struct drm_mm *mm)
+static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
+ struct drm_mm_node *child;
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free)
- return (0);
+ if (atomic)
+ child = kzalloc(sizeof(*child), GFP_ATOMIC);
+ else
+ child = kzalloc(sizeof(*child), GFP_KERNEL);
+
+ if (unlikely(child == NULL)) {
+ spin_lock(&mm->unused_lock);
+ if (list_empty(&mm->unused_nodes))
+ child = NULL;
+ else {
+ child =
+ list_entry(mm->unused_nodes.next,
+ struct drm_mm_node, node_list);
+ list_del(&child->node_list);
+ --mm->num_unused;
+ }
+ spin_unlock(&mm->unused_lock);
+ }
+ return child;
+}
- return (entry->size);
+/* drm_mm_pre_get() - pre allocate drm_mm_node structure
+ * drm_mm: memory manager struct we are pre-allocating for
+ *
+ * Returns 0 on success or -ENOMEM if allocation fails.
+ */
+int drm_mm_pre_get(struct drm_mm *mm)
+{
+ struct drm_mm_node *node;
+
+ spin_lock(&mm->unused_lock);
+ while (mm->num_unused < MM_UNUSED_TARGET) {
+ spin_unlock(&mm->unused_lock);
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ spin_lock(&mm->unused_lock);
+
+ if (unlikely(node == NULL)) {
+ int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
+ spin_unlock(&mm->unused_lock);
+ return ret;
+ }
+ ++mm->num_unused;
+ list_add_tail(&node->node_list, &mm->unused_nodes, (caddr_t)node);
+ }
+ spin_unlock(&mm->unused_lock);
+ return 0;
}
-int
-drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
+static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color)
{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+ unsigned long adj_start = hole_start;
+ unsigned long adj_end = hole_end;
+
+ BUG_ON(node->allocated);
+
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+
+ if (alignment) {
+ unsigned tmp = adj_start % alignment;
+ if (tmp)
+ adj_start += alignment - tmp;
+ }
+
+ if (adj_start == hole_start) {
+ hole_node->hole_follows = 0;
+ list_del(&hole_node->hole_stack);
+ }
+
+ node->start = adj_start;
+ node->size = size;
+ node->mm = mm;
+ node->color = color;
+ node->allocated = 1;
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free)
- return (ENOMEM);
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list, (caddr_t)node);
- if (entry->size <= size)
- return (ENOMEM);
+ BUG_ON(node->start + node->size > adj_end);
- entry->size -= size;
- return (0);
+ node->hole_follows = 0;
+ if (__drm_mm_hole_node_start(node) < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack, (caddr_t)node);
+ node->hole_follows = 1;
+ }
}
+struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
+ unsigned long start,
+ unsigned long size,
+ bool atomic)
+{
+ struct drm_mm_node *hole, *node;
+ unsigned long end = start + size;
+ unsigned long hole_start;
+ unsigned long hole_end;
+
+ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+ if (hole_start > start || hole_end < end)
+ continue;
+
+ node = drm_mm_kmalloc(mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
+
+ node->start = start;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole->node_list, (caddr_t)node);
+
+ if (start == hole_start) {
+ hole->hole_follows = 0;
+ list_del_init(&hole->hole_stack);
+ }
+
+ node->hole_follows = 0;
+ if (end != hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack, (caddr_t)node);
+ node->hole_follows = 1;
+ }
+
+ return node;
+ }
+
+ DRM_ERROR("no hole found for block 0x%lx + 0x%lx\n", start, size);
+ return NULL;
+}
-static int
-drm_mm_create_tail_node(struct drm_mm *mm,
- unsigned long start,
- unsigned long size)
+struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ int atomic)
{
- struct drm_mm_node *child;
+ struct drm_mm_node *node;
+
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
- child = (struct drm_mm_node *)
- drm_alloc(sizeof (*child), DRM_MEM_MM);
- if (!child)
- return (ENOMEM);
+ drm_mm_insert_helper(hole_node, node, size, alignment, color);
- child->free = 1;
- child->size = size;
- child->start = start;
- child->mm = mm;
+ return node;
+}
+
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. The preallocated memory node
+ * must be cleared.
+ */
+int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color)
+{
+ struct drm_mm_node *hole_node;
- list_add_tail(&child->ml_entry, &mm->ml_entry, (caddr_t)child);
- list_add_tail(&child->fl_entry, &mm->fl_entry, (caddr_t)child);
+ hole_node = drm_mm_search_free_generic(mm, size, alignment,
+ color, 0);
+ if (!hole_node)
+ return -ENOSPC;
- return (0);
+ drm_mm_insert_helper(hole_node, node, size, alignment, color);
+ return 0;
}
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+ return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
+}
-int
-drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
+static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color,
+ unsigned long start, unsigned long end)
{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+ unsigned long adj_start = hole_start;
+ unsigned long adj_end = hole_end;
+
+ BUG_ON(!hole_node->hole_follows || node->allocated);
+
+ if (adj_start < start)
+ adj_start = start;
+ if (adj_end > end)
+ adj_end = end;
+
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+
+ if (alignment) {
+ unsigned tmp = adj_start % alignment;
+ if (tmp)
+ adj_start += alignment - tmp;
+ }
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free) {
- return (drm_mm_create_tail_node(mm,
- entry->start + entry->size, size));
+ if (adj_start == hole_start) {
+ hole_node->hole_follows = 0;
+ list_del(&hole_node->hole_stack);
+ }
+
+ node->start = adj_start;
+ node->size = size;
+ node->mm = mm;
+ node->color = color;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list, (caddr_t)node);
+
+ BUG_ON(node->start + node->size > adj_end);
+ BUG_ON(node->start + node->size > end);
+
+ node->hole_follows = 0;
+ if (__drm_mm_hole_node_start(node) < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack, (caddr_t)node);
+ node->hole_follows = 1;
}
- entry->size += size;
- return (0);
}
-static struct drm_mm_node *
-drm_mm_split_at_start(struct drm_mm_node *parent,
- unsigned long size)
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ int atomic)
{
- struct drm_mm_node *child;
+ struct drm_mm_node *node;
- child = (struct drm_mm_node *)
- drm_alloc(sizeof (*child), DRM_MEM_MM);
- if (!child)
- return (NULL);
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
- INIT_LIST_HEAD(&child->fl_entry);
+ drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
+ start, end);
- child->free = 0;
- child->size = size;
- child->start = parent->start;
- child->mm = parent->mm;
+ return node;
+}
- list_add_tail(&child->ml_entry, &parent->ml_entry, (caddr_t)child);
- INIT_LIST_HEAD(&child->fl_entry);
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. This is for range
+ * restricted allocations. The preallocated memory node must be cleared.
+ */
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment, unsigned long color,
+ unsigned long start, unsigned long end)
+{
+ struct drm_mm_node *hole_node;
+
+ hole_node = drm_mm_search_free_in_range_generic(mm,
+ size, alignment, color,
+ start, end, 0);
+ if (!hole_node)
+ return -ENOSPC;
+
+ drm_mm_insert_helper_range(hole_node, node,
+ size, alignment, color,
+ start, end);
+ return 0;
+}
- parent->size -= size;
- parent->start += size;
- return (child);
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
+{
+ return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
}
-/*
- * Put a block. Merge with the previous and / or next block if they are free.
- * Otherwise add to the free stack.
+/**
+ * Remove a memory node from the allocator.
*/
+void drm_mm_remove_node(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+
+ BUG_ON(node->scanned_block || node->scanned_prev_free
+ || node->scanned_next_free);
+
+ prev_node =
+ list_entry(node->node_list.prev, struct drm_mm_node, node_list);
+
+ if (node->hole_follows) {
+ BUG_ON(__drm_mm_hole_node_start(node) ==
+ __drm_mm_hole_node_end(node));
+ list_del(&node->hole_stack);
+ /* LINTED */
+ } else
+ BUG_ON(__drm_mm_hole_node_start(node) !=
+ __drm_mm_hole_node_end(node));
+
-void
-drm_mm_put_block(struct drm_mm_node *cur)
+ if (!prev_node->hole_follows) {
+ prev_node->hole_follows = 1;
+ list_add(&prev_node->hole_stack, &mm->hole_stack, (caddr_t)prev_node);
+ } else
+ list_move(&prev_node->hole_stack, &mm->hole_stack, (caddr_t)prev_node);
+
+ list_del(&node->node_list);
+ node->allocated = 0;
+}
+
+/*
+ * Remove a memory node from the allocator and free the allocated struct
+ * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
+ * drm_mm_get_block functions.
+ */
+void drm_mm_put_block(struct drm_mm_node *node)
{
- struct drm_mm *mm = cur->mm;
- struct list_head *cur_head = &cur->ml_entry;
- struct list_head *root_head = &mm->ml_entry;
- struct drm_mm_node *prev_node = NULL;
- struct drm_mm_node *next_node;
+ struct drm_mm *mm = node->mm;
- int merged = 0;
+ drm_mm_remove_node(node);
- if (cur_head->prev != root_head) {
- prev_node = list_entry(cur_head->prev,
- struct drm_mm_node, ml_entry);
- if (prev_node->free) {
- prev_node->size += cur->size;
- merged = 1;
- }
- }
- if (cur_head->next != root_head) {
- next_node = list_entry(cur_head->next,
- struct drm_mm_node, ml_entry);
- if (next_node->free) {
- if (merged) {
- prev_node->size += next_node->size;
- list_del(&next_node->ml_entry);
- list_del(&next_node->fl_entry);
- drm_free(next_node,
- sizeof (*next_node), DRM_MEM_MM);
- } else {
- next_node->size += cur->size;
- next_node->start = cur->start;
- merged = 1;
- }
- }
- }
- if (!merged) {
- cur->free = 1;
- list_add(&cur->fl_entry, &mm->fl_entry, (caddr_t)cur);
- } else {
- list_del(&cur->ml_entry);
- drm_free(cur, sizeof (*cur), DRM_MEM_MM);
+ spin_lock(&mm->unused_lock);
+ if (mm->num_unused < MM_UNUSED_TARGET) {
+ list_add(&node->node_list, &mm->unused_nodes, (caddr_t)node);
+ ++mm->num_unused;
+ } else
+ kfree(node, sizeof(struct drm_mm_node));
+ spin_unlock(&mm->unused_lock);
+}
+
+static int check_free_hole(unsigned long start, unsigned long end,
+ unsigned long size, unsigned alignment)
+{
+ if (end - start < size)
+ return 0;
+
+ if (alignment) {
+ unsigned tmp = start % alignment;
+ if (tmp)
+ start += alignment - tmp;
}
+
+ return end >= start + size;
}
-struct drm_mm_node *
-drm_mm_get_block(struct drm_mm_node *parent,
- unsigned long size,
- unsigned alignment)
+struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ bool best_match)
{
+ struct drm_mm_node *entry;
+ struct drm_mm_node *best;
+ unsigned long adj_start;
+ unsigned long adj_end;
+ unsigned long best_size;
- struct drm_mm_node *align_splitoff = NULL;
- struct drm_mm_node *child;
- unsigned tmp = 0;
+ BUG_ON(mm->scanned_blocks);
- if (alignment)
- tmp = parent->start % alignment;
+ best = NULL;
+ best_size = ~0UL;
- if (tmp) {
- align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
- if (!align_splitoff)
- return (NULL);
- }
+ drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+ if (mm->color_adjust) {
+ mm->color_adjust(entry, color, &adj_start, &adj_end);
+ if (adj_end <= adj_start)
+ continue;
+ }
- if (parent->size == size) {
- list_del_init(&parent->fl_entry);
- parent->free = 0;
- return (parent);
- } else {
- child = drm_mm_split_at_start(parent, size);
- }
+ if (!check_free_hole(adj_start, adj_end, size, alignment))
+ continue;
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
+ if (!best_match)
+ return entry;
+
+ if (entry->size < best_size) {
+ best = entry;
+ best_size = entry->size;
+ }
+ }
- return (child);
+ return best;
}
-struct drm_mm_node *
-drm_mm_search_free(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- int best_match)
+struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ bool best_match)
{
- struct list_head *list;
- const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
+ unsigned long adj_start;
+ unsigned long adj_end;
unsigned long best_size;
- unsigned wasted;
+
+ BUG_ON(mm->scanned_blocks);
best = NULL;
best_size = ~0UL;
- list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_mm_node, fl_entry);
- wasted = 0;
+ drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+ if (adj_start < start)
+ adj_start = start;
+ if (adj_end > end)
+ adj_end = end;
+
+ if (mm->color_adjust) {
+ mm->color_adjust(entry, color, &adj_start, &adj_end);
+ if (adj_end <= adj_start)
+ continue;
+ }
- if (entry->size < size)
+ if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
- if (alignment) {
- register unsigned tmp = entry->start % alignment;
- if (tmp)
- wasted += alignment - tmp;
+ if (!best_match)
+ return entry;
+
+ if (entry->size < best_size) {
+ best = entry;
+ best_size = entry->size;
}
+ }
+ return best;
+}
- if (entry->size >= size + wasted) {
- if (!best_match)
- return (entry);
- if (size < best_size) {
- best = entry;
- best_size = entry->size;
- }
- }
+/**
+ * Moves an allocation. To be used with embedded struct drm_mm_node.
+ */
+void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
+{
+ list_replace(&old->node_list, &new->node_list);
+ list_replace(&old->hole_stack, &new->hole_stack);
+ new->hole_follows = old->hole_follows;
+ new->mm = old->mm;
+ new->start = old->start;
+ new->size = old->size;
+ new->color = old->color;
+
+ old->allocated = 0;
+ new->allocated = 1;
+}
+
+/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan(struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color)
+{
+ mm->scan_color = color;
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_end = 0;
+ mm->scan_check_range = 0;
+ mm->prev_scanned_node = NULL;
+}
+
+/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole. This version is for range-restricted scans.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan_with_range(struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end)
+{
+ mm->scan_color = color;
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_end = 0;
+ mm->scan_start = start;
+ mm->scan_end = end;
+ mm->scan_check_range = 1;
+ mm->prev_scanned_node = NULL;
+}
+
+/**
+ * Add a node to the scan list that might be freed to make space for the desired
+ * hole.
+ *
+ * Returns non-zero, if a hole has been found, zero otherwise.
+ */
+int drm_mm_scan_add_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+ unsigned long hole_start, hole_end;
+ unsigned long adj_start, adj_end;
+
+ mm->scanned_blocks++;
+
+ BUG_ON(node->scanned_block);
+ node->scanned_block = 1;
+
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
+
+ node->scanned_preceeds_hole = prev_node->hole_follows;
+ prev_node->hole_follows = 1;
+ list_del(&node->node_list);
+ node->node_list.prev = &prev_node->node_list;
+ node->node_list.next = &mm->prev_scanned_node->node_list;
+ mm->prev_scanned_node = node;
+
+ adj_start = hole_start = drm_mm_hole_node_start(prev_node);
+ adj_end = hole_end = drm_mm_hole_node_end(prev_node);
+
+ if (mm->scan_check_range) {
+ if (adj_start < mm->scan_start)
+ adj_start = mm->scan_start;
+ if (adj_end > mm->scan_end)
+ adj_end = mm->scan_end;
+ }
+
+ if (mm->color_adjust)
+ mm->color_adjust(prev_node, mm->scan_color,
+ &adj_start, &adj_end);
+
+ if (check_free_hole(adj_start , adj_end,
+ mm->scan_size, mm->scan_alignment)) {
+ mm->scan_hit_start = hole_start;
+ mm->scan_hit_end = hole_end;
+ return 1;
}
- return (best);
+ return 0;
}
-int
-drm_mm_clean(struct drm_mm *mm)
+/**
+ * Remove a node from the scan list.
+ *
+ * Nodes _must_ be removed in the exact same order from the scan list as they
+ * have been added, otherwise the internal state of the memory manager will be
+ * corrupted.
+ *
+ * When the scan list is empty, the selected memory nodes can be freed. An
+ * immediatly following drm_mm_search_free with best_match = 0 will then return
+ * the just freed block (because its at the top of the free_stack list).
+ *
+ * Returns one if this block should be evicted, zero otherwise. Will always
+ * return zero when no hole has been found.
+ */
+int drm_mm_scan_remove_block(struct drm_mm_node *node)
{
- struct list_head *head = &mm->ml_entry;
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
- return (head->next->next == head);
+ mm->scanned_blocks--;
+
+ BUG_ON(!node->scanned_block);
+ node->scanned_block = 0;
+
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
+
+ prev_node->hole_follows = node->scanned_preceeds_hole;
+ list_add(&node->node_list, &prev_node->node_list, (caddr_t)node);
+
+ return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
+ node->start < mm->scan_hit_end);
}
-int
-drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size)
+
+int drm_mm_clean(struct drm_mm * mm)
{
- INIT_LIST_HEAD(&mm->ml_entry);
- INIT_LIST_HEAD(&mm->fl_entry);
+ struct list_head *head = &mm->head_node.node_list;
- return (drm_mm_create_tail_node(mm, start, size));
+ return (head->next->next == head);
}
-void
-drm_mm_takedown(struct drm_mm *mm)
+void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
- struct list_head *bnode = mm->fl_entry.next;
- struct drm_mm_node *entry;
+ INIT_LIST_HEAD(&mm->hole_stack);
+ INIT_LIST_HEAD(&mm->unused_nodes);
+ mm->num_unused = 0;
+ mm->scanned_blocks = 0;
+ spin_lock_init(&mm->unused_lock);
+
+ /* Clever trick to avoid a special case in the free hole tracking. */
+ INIT_LIST_HEAD(&mm->head_node.node_list);
+ INIT_LIST_HEAD(&mm->head_node.hole_stack);
+ mm->head_node.hole_follows = 1;
+ mm->head_node.scanned_block = 0;
+ mm->head_node.scanned_prev_free = 0;
+ mm->head_node.scanned_next_free = 0;
+ mm->head_node.mm = mm;
+ mm->head_node.start = start + size;
+ mm->head_node.size = start - mm->head_node.start;
+ mm->head_node.node_list.contain_ptr = (caddr_t)&mm->head_node;
+ list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack, (caddr_t)&mm->head_node);
+ mm->color_adjust = NULL;
+}
+
- entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+void drm_mm_takedown(struct drm_mm * mm)
+{
+ struct drm_mm_node *entry, *next;
- if (entry->ml_entry.next != &mm->ml_entry ||
- entry->fl_entry.next != &mm->fl_entry) {
+ if (!list_empty(&mm->head_node.node_list)) {
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
return;
}
- list_del(&entry->fl_entry);
- list_del(&entry->ml_entry);
+ spin_lock(&mm->unused_lock);
+ list_for_each_entry_safe(entry, next, struct drm_mm_node, &mm->unused_nodes, node_list) {
+ list_del(&entry->node_list);
+ kfree(entry, sizeof(struct drm_mm_node));
+ --mm->num_unused;
+ }
+ spin_unlock(&mm->unused_lock);
- drm_free(entry, sizeof (*entry), DRM_MEM_MM);
+ BUG_ON(mm->num_unused != 0);
}
-void
-drm_mm_clean_ml(const struct drm_mm *mm)
+static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
+ const char *prefix)
{
- const struct list_head *mlstack = &mm->ml_entry;
- struct list_head *list, *temp;
- struct drm_mm_node *entry;
+ unsigned long hole_start, hole_end, hole_size;
+
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(entry);
+ hole_end = drm_mm_hole_node_end(entry);
+ hole_size = hole_end - hole_start;
+ DRM_DEBUG("%s 0x%08lx-0x%08lx: %8lu: free\n",
+ prefix, hole_start, hole_end,
+ hole_size);
+ return hole_size;
+ }
- if (mlstack->next == NULL)
- return;
+ return 0;
+}
- list_for_each_safe(list, temp, mlstack) {
- entry = list_entry(list, struct drm_mm_node, ml_entry);
- DRM_DEBUG("ml_entry 0x%x, size 0x%x, start 0x%x",
- entry, entry->size, entry->start);
+void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
+{
+ struct drm_mm_node *entry;
+ unsigned long total_used = 0, total_free = 0, total = 0;
+
+ total_free += drm_mm_debug_hole(&mm->head_node, prefix);
- list_del(&entry->fl_entry);
- list_del(&entry->ml_entry);
- drm_free(entry, sizeof (*entry), DRM_MEM_MM);
+ drm_mm_for_each_node(entry, struct drm_mm_node, mm) {
+ DRM_DEBUG("%s 0x%08lx-0x%08lx: %8lu: used\n",
+ prefix, entry->start, entry->start + entry->size,
+ entry->size);
+ total_used += entry->size;
+ total_free += drm_mm_debug_hole(entry, prefix);
}
+ total = total_free + total_used;
+
+ DRM_DEBUG("%s total: %lu, used %lu free %lu\n", prefix, total,
+ total_used, total_free);
+}
+
+bool drm_mm_initialized(struct drm_mm *mm)
+{
+ return (mm->hole_stack.next != NULL);
+}
+
+unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+{
+ return hole_node->start + hole_node->size;
+}
+
+unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+{
+ BUG_ON(!hole_node->hole_follows);
+ return __drm_mm_hole_node_start(hole_node);
+}
+
+unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+ struct drm_mm_node *node;
+ node = list_entry(hole_node->node_list.next,
+ struct drm_mm_node, node_list);
+ return node->start;
+}
+
+unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+ return __drm_mm_hole_node_end(hole_node);
+}
+
+struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment)
+{
+ return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
+}
+struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment)
+{
+ return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
+}
+struct drm_mm_node *drm_mm_get_block_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ return drm_mm_get_block_range_generic(parent, size, alignment, 0,
+ start, end, 0);
+}
+struct drm_mm_node *drm_mm_get_block_atomic_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ return drm_mm_get_block_range_generic(parent, size, alignment, 0,
+ start, end, 1);
+}
+
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ bool best_match)
+{
+ return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
+}
+struct drm_mm_node *drm_mm_search_free_in_range(
+ const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ bool best_match)
+{
+ return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
+ start, end, best_match);
}
diff --git a/usr/src/uts/common/io/drm/drm_modes.c b/usr/src/uts/common/io/drm/drm_modes.c
new file mode 100644
index 0000000..10f843e
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_modes.c
@@ -0,0 +1,1140 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ *
+ * Copyright © 1997-2003 by The XFree86 Project, Inc.
+ * Copyright © 2007 Dave Airlie
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2005-2006 Luc Verhaegen
+ * Copyright (c) 2001, Andy Ritger aritger@nvidia.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the name of the copyright holder(s)
+ * and author(s) shall not be used in advertising or otherwise to promote
+ * the sale, use or other dealings in this Software without prior written
+ * authorization from the copyright holder(s) and author(s).
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_linux_list.h"
+/**
+ * drm_mode_debug_printmodeline - debug print a mode
+ * @dev: DRM device
+ * @mode: mode to print
+ *
+ * LOCKING:
+ * None.
+ *
+ * Describe @mode using DRM_DEBUG.
+ */
+void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
+ "0x%x 0x%x\n",
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+}
+
+/**
+ * drm_cvt_mode -create a modeline based on CVT algorithm
+ * @dev: DRM device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh : vrefresh rate
+ * @reduced : Whether the GTF calculation is simplified
+ * @interlaced:Whether the interlace is supported
+ *
+ * LOCKING:
+ * none.
+ *
+ * return the modeline based on CVT algorithm
+ *
+ * This function is called to generate the modeline based on CVT algorithm
+ * according to the hdisplay, vdisplay, vrefresh.
+ * It is based from the VESA(TM) Coordinated Video Timing Generator by
+ * Graham Loveridge April 9, 2003 available at
+ * http://www.vesa.org/public/CVT/CVTd6r1.xls
+ *
+ * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
+ * What I have done is to translate it by using integer calculation.
+ */
+#define HV_FACTOR 1000
+struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
+ int vdisplay, int vrefresh,
+ bool reduced, bool interlaced, bool margins)
+{
+ /* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define CVT_MARGIN_PERCENTAGE 18
+ /* 2) character cell horizontal granularity (pixels) - default 8 */
+#define CVT_H_GRANULARITY 8
+ /* 3) Minimum vertical porch (lines) - default 3 */
+#define CVT_MIN_V_PORCH 3
+ /* 4) Minimum number of vertical back porch lines - default 6 */
+#define CVT_MIN_V_BPORCH 6
+ /* Pixel Clock step (kHz) */
+#define CVT_CLOCK_STEP 250
+ struct drm_display_mode *drm_mode;
+ unsigned int vfieldrate, hperiod;
+ int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
+ int interlace;
+
+ /* allocate the drm_display_mode structure. If failure, we will
+ * return directly
+ */
+ drm_mode = drm_mode_create(dev);
+ if (!drm_mode)
+ return NULL;
+
+ /* the CVT default refresh rate is 60Hz */
+ if (!vrefresh)
+ vrefresh = 60;
+
+ /* the required field fresh rate */
+ if (interlaced)
+ vfieldrate = vrefresh * 2;
+ else
+ vfieldrate = vrefresh;
+
+ /* horizontal pixels */
+ hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
+
+ /* determine the left&right borders */
+ hmargin = 0;
+ if (margins) {
+ hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+ hmargin -= hmargin % CVT_H_GRANULARITY;
+ }
+ /* find the total active pixels */
+ drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
+
+ /* find the number of lines per field */
+ if (interlaced)
+ vdisplay_rnd = vdisplay / 2;
+ else
+ vdisplay_rnd = vdisplay;
+
+ /* find the top & bottom borders */
+ vmargin = 0;
+ if (margins)
+ vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+
+ drm_mode->vdisplay = vdisplay + 2 * vmargin;
+
+ /* Interlaced */
+ if (interlaced)
+ interlace = 1;
+ else
+ interlace = 0;
+
+ /* Determine VSync Width from aspect ratio */
+ if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
+ vsync = 4;
+ else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
+ vsync = 5;
+ else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
+ vsync = 6;
+ else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
+ vsync = 7;
+ else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
+ vsync = 7;
+ else /* custom */
+ vsync = 10;
+
+ if (!reduced) {
+ /* simplify the GTF calculation */
+ /* 4) Minimum time of vertical sync + back porch interval (µs)
+ * default 550.0
+ */
+ int tmp1, tmp2;
+#define CVT_MIN_VSYNC_BP 550
+ /* 3) Nominal HSync width (% of line period) - default 8 */
+#define CVT_HSYNC_PERCENTAGE 8
+ unsigned int hblank_percentage;
+ /* LINTED */
+ int vsyncandback_porch, vback_porch, hblank;
+
+ /* estimated the horizontal period */
+ tmp1 = HV_FACTOR * 1000000 -
+ CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
+ tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
+ interlace;
+ hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
+
+ tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
+ /* 9. Find number of lines in sync + backporch */
+ if (tmp1 < (vsync + CVT_MIN_V_PORCH))
+ vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
+ else
+ vsyncandback_porch = tmp1;
+ /* 10. Find number of lines in back porch */
+ vback_porch = vsyncandback_porch - vsync;
+ drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
+ vsyncandback_porch + CVT_MIN_V_PORCH;
+ /* 5) Definition of Horizontal blanking time limitation */
+ /* Gradient (%/kHz) - default 600 */
+#define CVT_M_FACTOR 600
+ /* Offset (%) - default 40 */
+#define CVT_C_FACTOR 40
+ /* Blanking time scaling factor - default 128 */
+#define CVT_K_FACTOR 128
+ /* Scaling factor weighting - default 20 */
+#define CVT_J_FACTOR 20
+#define CVT_M_PRIME (CVT_M_FACTOR * CVT_K_FACTOR / 256)
+#define CVT_C_PRIME ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
+ CVT_J_FACTOR)
+ /* 12. Find ideal blanking duty cycle from formula */
+ hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
+ hperiod / 1000;
+ /* 13. Blanking time */
+ if (hblank_percentage < 20 * HV_FACTOR)
+ hblank_percentage = 20 * HV_FACTOR;
+ hblank = drm_mode->hdisplay * hblank_percentage /
+ (100 * HV_FACTOR - hblank_percentage);
+ hblank -= hblank % (2 * CVT_H_GRANULARITY);
+ /* 14. find the total pixes per line */
+ drm_mode->htotal = drm_mode->hdisplay + hblank;
+ drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
+ drm_mode->hsync_start = drm_mode->hsync_end -
+ (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
+ drm_mode->hsync_start += CVT_H_GRANULARITY -
+ drm_mode->hsync_start % CVT_H_GRANULARITY;
+ /* fill the Vsync values */
+ drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
+ drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+ } else {
+ /* Reduced blanking */
+ /* Minimum vertical blanking interval time (µs)- default 460 */
+#define CVT_RB_MIN_VBLANK 460
+ /* Fixed number of clocks for horizontal sync */
+#define CVT_RB_H_SYNC 32
+ /* Fixed number of clocks for horizontal blanking */
+#define CVT_RB_H_BLANK 160
+ /* Fixed number of lines for vertical front porch - default 3*/
+#define CVT_RB_VFPORCH 3
+ int vbilines;
+ int tmp1, tmp2;
+ /* 8. Estimate Horizontal period. */
+ tmp1 = HV_FACTOR * 1000000 -
+ CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
+ tmp2 = vdisplay_rnd + 2 * vmargin;
+ hperiod = tmp1 / (tmp2 * vfieldrate);
+ /* 9. Find number of lines in vertical blanking */
+ vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
+ /* 10. Check if vertical blanking is sufficient */
+ if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
+ vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
+ /* 11. Find total number of lines in vertical field */
+ drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
+ /* 12. Find total number of pixels in a line */
+ drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
+ /* Fill in HSync values */
+ drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
+ drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
+ /* Fill in VSync values */
+ drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
+ drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+ }
+ /* 15/13. Find pixel clock frequency (kHz for xf86) */
+ drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
+ drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
+ /* 18/16. Find actual vertical frame frequency */
+ /* ignore - just set the mode flag for interlaced */
+ if (interlaced) {
+ drm_mode->vtotal *= 2;
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
+ /* Fill the mode line name */
+ drm_mode_set_name(drm_mode);
+ if (reduced)
+ drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
+ DRM_MODE_FLAG_NVSYNC);
+ else
+ drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_NHSYNC);
+
+ return drm_mode;
+}
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev :drm device
+ * @hdisplay :hdisplay size
+ * @vdisplay :vdisplay size
+ * @vrefresh :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins :whether the margin is supported
+ * @GTF_[MCKJ] :extended GTF formula parameters
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two. For a C of 40, pass in 80.
+ */
+struct drm_display_mode *
+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+ int vrefresh, bool interlaced, int margins,
+ int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
+{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define GTF_MARGIN_PERCENTAGE 18
+ /* 2) character cell horizontal granularity (pixels) - default 8 */
+#define GTF_CELL_GRAN 8
+ /* 3) Minimum vertical porch (lines) - default 3 */
+#define GTF_MIN_V_PORCH 1
+ /* width of vsync in lines */
+#define V_SYNC_RQD 3
+ /* width of hsync as % of total line */
+#define H_SYNC_PERCENT 8
+ /* min time of vsync + back porch (microsec) */
+#define MIN_VSYNC_PLUS_BP 550
+ /* C' and M' are part of the Blanking Duty Cycle computation */
+#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
+#define GTF_M_PRIME (GTF_K * GTF_M / 256)
+ struct drm_display_mode *drm_mode;
+ unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
+ int top_margin, bottom_margin;
+ int interlace;
+ unsigned int hfreq_est;
+ /* LINTED */
+ int vsync_plus_bp, vback_porch;
+ /* LINTED */
+ unsigned int vtotal_lines, vfieldrate_est, hperiod;
+ /* LINTED */
+ unsigned int vfield_rate, vframe_rate;
+ int left_margin, right_margin;
+ unsigned int total_active_pixels, ideal_duty_cycle;
+ unsigned int hblank, total_pixels, pixel_freq;
+ int hsync, hfront_porch, vodd_front_porch_lines;
+ unsigned int tmp1, tmp2;
+
+ drm_mode = drm_mode_create(dev);
+ if (!drm_mode)
+ return NULL;
+
+ /* 1. In order to give correct results, the number of horizontal
+ * pixels requested is first processed to ensure that it is divisible
+ * by the character size, by rounding it to the nearest character
+ * cell boundary:
+ */
+ hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+ hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
+
+ /* 2. If interlace is requested, the number of vertical lines assumed
+ * by the calculation must be halved, as the computation calculates
+ * the number of vertical lines per field.
+ */
+ if (interlaced)
+ vdisplay_rnd = vdisplay / 2;
+ else
+ vdisplay_rnd = vdisplay;
+
+ /* 3. Find the frame rate required: */
+ if (interlaced)
+ vfieldrate_rqd = vrefresh * 2;
+ else
+ vfieldrate_rqd = vrefresh;
+
+ /* 4. Find number of lines in Top margin: */
+ top_margin = 0;
+ if (margins)
+ top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+ 1000;
+ /* 5. Find number of lines in bottom margin: */
+ bottom_margin = top_margin;
+
+ /* 6. If interlace is required, then set variable interlace: */
+ if (interlaced)
+ interlace = 1;
+ else
+ interlace = 0;
+
+ /* 7. Estimate the Horizontal frequency */
+ {
+ tmp1 = (1000000 - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
+ tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
+ 2 + interlace;
+ hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
+ }
+
+ /* 8. Find the number of lines in V sync + back porch */
+ /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
+ vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
+ vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
+ /* 9. Find the number of lines in V back porch alone: */
+ vback_porch = vsync_plus_bp - V_SYNC_RQD;
+ /* 10. Find the total number of lines in Vertical field period: */
+ vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
+ vsync_plus_bp + GTF_MIN_V_PORCH;
+ /* 11. Estimate the Vertical field frequency: */
+ vfieldrate_est = hfreq_est / vtotal_lines;
+ /* 12. Find the actual horizontal period: */
+ hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
+
+ /* 13. Find the actual Vertical field frequency: */
+ vfield_rate = hfreq_est / vtotal_lines;
+ /* 14. Find the Vertical frame frequency: */
+ if (interlaced)
+ vframe_rate = vfield_rate / 2;
+ else
+ vframe_rate = vfield_rate;
+ /* 15. Find number of pixels in left margin: */
+ if (margins)
+ left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+ 1000;
+ else
+ left_margin = 0;
+
+ /* 16.Find number of pixels in right margin: */
+ right_margin = left_margin;
+ /* 17.Find total number of active pixels in image and left and right */
+ total_active_pixels = hdisplay_rnd + left_margin + right_margin;
+ /* 18.Find the ideal blanking duty cycle from blanking duty cycle */
+ ideal_duty_cycle = GTF_C_PRIME * 1000 -
+ (GTF_M_PRIME * 1000000 / hfreq_est);
+ /* 19.Find the number of pixels in the blanking time to the nearest
+ * double character cell: */
+ hblank = total_active_pixels * ideal_duty_cycle /
+ (100000 - ideal_duty_cycle);
+ hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
+ hblank = hblank * 2 * GTF_CELL_GRAN;
+ /* 20.Find total number of pixels: */
+ total_pixels = total_active_pixels + hblank;
+ /* 21.Find pixel clock frequency: */
+ pixel_freq = total_pixels * hfreq_est / 1000;
+ /* Stage 1 computations are now complete; I should really pass
+ * the results to another function and do the Stage 2 computations,
+ * but I only need a few more values so I'll just append the
+ * computations here for now */
+ /* 17. Find the number of pixels in the horizontal sync period: */
+ hsync = H_SYNC_PERCENT * total_pixels / 100;
+ hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+ hsync = hsync * GTF_CELL_GRAN;
+ /* 18. Find the number of pixels in horizontal front porch period */
+ hfront_porch = hblank / 2 - hsync;
+ /* 36. Find the number of lines in the odd front porch period: */
+ vodd_front_porch_lines = GTF_MIN_V_PORCH ;
+
+ /* finally, pack the results in the mode struct */
+ drm_mode->hdisplay = hdisplay_rnd;
+ drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
+ drm_mode->hsync_end = drm_mode->hsync_start + hsync;
+ drm_mode->htotal = total_pixels;
+ drm_mode->vdisplay = vdisplay_rnd;
+ drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
+ drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
+ drm_mode->vtotal = vtotal_lines;
+
+ drm_mode->clock = pixel_freq;
+
+ if (interlaced) {
+ drm_mode->vtotal *= 2;
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
+
+ drm_mode_set_name(drm_mode);
+ if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
+ drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+ else
+ drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
+
+ return drm_mode;
+}
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev :drm device
+ * @hdisplay :hdisplay size
+ * @vdisplay :vdisplay size
+ * @vrefresh :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins :whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ * GTF Spreadsheet by Andy Morrish (1/5/97)
+ * available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ *
+ * Standard GTF parameters:
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
+ */
+struct drm_display_mode *
+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+ bool lace, int margins)
+{
+ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
+ margins, 600, 40 * 2, 128, 20 * 2);
+}
+
+#ifdef CONFIG_VIDEOMODE_HELPERS
+int drm_display_mode_from_videomode(const struct videomode *vm,
+ struct drm_display_mode *dmode)
+{
+ dmode->hdisplay = vm->hactive;
+ dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
+ dmode->hsync_end = dmode->hsync_start + vm->hsync_len;
+ dmode->htotal = dmode->hsync_end + vm->hback_porch;
+
+ dmode->vdisplay = vm->vactive;
+ dmode->vsync_start = dmode->vdisplay + vm->vfront_porch;
+ dmode->vsync_end = dmode->vsync_start + vm->vsync_len;
+ dmode->vtotal = dmode->vsync_end + vm->vback_porch;
+
+ dmode->clock = vm->pixelclock / 1000;
+
+ dmode->flags = 0;
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
+ dmode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
+ dmode->flags |= DRM_MODE_FLAG_NHSYNC;
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
+ dmode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
+ dmode->flags |= DRM_MODE_FLAG_NVSYNC;
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ dmode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN)
+ dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
+ dmode->flags |= DRM_MODE_FLAG_DBLCLK;
+ drm_mode_set_name(dmode);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+/**
+ * of_get_drm_display_mode - get a drm_display_mode from devicetree
+ * @np: device_node with the timing specification
+ * @dmode: will be set to the return value
+ * @index: index into the list of display timings in devicetree
+ *
+ * This function is expensive and should only be used, if only one mode is to be
+ * read from DT. To get multiple modes start with of_get_display_timings and
+ * work with that instead.
+ */
+int of_get_drm_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode, int index)
+{
+ struct videomode vm;
+ int ret;
+
+ ret = of_get_videomode(np, &vm, index);
+ if (ret)
+ return ret;
+
+ drm_display_mode_from_videomode(&vm, dmode);
+
+ pr_debug("%s: got %dx%d display mode from %s\n",
+ of_node_full_name(np), vm.hactive, vm.vactive, np->name);
+ drm_mode_debug_printmodeline(dmode);
+
+ return 0;
+}
+#endif /* CONFIG_OF */
+#endif /* CONFIG_VIDEOMODE_HELPERS */
+
+/**
+ * drm_mode_set_name - set the name on a mode
+ * @mode: name will be set in this mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Set the name of @mode to a standard format.
+ */
+void drm_mode_set_name(struct drm_display_mode *mode)
+{
+ bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ (void) snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+ mode->hdisplay, mode->vdisplay,
+ interlaced ? "i" : "");
+}
+
+/**
+ * drm_mode_list_concat - move modes from one list to another
+ * @head: source list
+ * @new: dst list
+ *
+ * LOCKING:
+ * Caller must ensure both lists are locked.
+ *
+ * Move all the modes from @head to @new.
+ */
+void drm_mode_list_concat(struct list_head *head, struct list_head *new)
+{
+
+ struct list_head *entry, *tmp;
+
+ list_for_each_safe(entry, tmp, head) {
+ list_move_tail(entry, new, (caddr_t)entry);
+ }
+}
+
+/**
+ * drm_mode_width - get the width of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's width (hdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->hdisplay
+ */
+int drm_mode_width(const struct drm_display_mode *mode)
+{
+ return mode->hdisplay;
+
+}
+
+/**
+ * drm_mode_height - get the height of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's height (vdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->vdisplay
+ */
+int drm_mode_height(const struct drm_display_mode *mode)
+{
+ return mode->vdisplay;
+}
+
+/** drm_mode_hsync - get the hsync of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ */
+int drm_mode_hsync(const struct drm_display_mode *mode)
+{
+ unsigned int calc_val;
+
+ if (mode->hsync)
+ return mode->hsync;
+
+ if (mode->htotal < 0)
+ return 0;
+
+ calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+ calc_val += 500; /* round to 1000Hz */
+ calc_val /= 1000; /* truncate to kHz */
+
+ return calc_val;
+}
+
+/**
+ * drm_mode_vrefresh - get the vrefresh of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
+ *
+ * FIXME: why is this needed? shouldn't vrefresh be set already?
+ *
+ * RETURNS:
+ * Vertical refresh rate. It will be the result of actual value plus 0.5.
+ * If it is 70.288, it will return 70Hz.
+ * If it is 59.6, it will return 60Hz.
+ */
+int drm_mode_vrefresh(const struct drm_display_mode *mode)
+{
+ int refresh = 0;
+ unsigned int calc_val;
+
+ if (mode->vrefresh > 0)
+ refresh = mode->vrefresh;
+ else if (mode->htotal > 0 && mode->vtotal > 0) {
+ int vtotal;
+ vtotal = mode->vtotal;
+ /* work out vrefresh the value will be x1000 */
+ calc_val = (mode->clock * 1000);
+ calc_val /= mode->htotal;
+ refresh = (calc_val + vtotal / 2) / vtotal;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ refresh *= 2;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ refresh /= 2;
+ if (mode->vscan > 1)
+ refresh /= mode->vscan;
+ }
+ return refresh;
+}
+
+/**
+ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
+ * @p: mode
+ * @adjust_flags: unused? (FIXME)
+ *
+ * LOCKING:
+ * None.
+ *
+ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ */
+void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
+{
+ if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
+ return;
+
+ p->crtc_hdisplay = p->hdisplay;
+ p->crtc_hsync_start = p->hsync_start;
+ p->crtc_hsync_end = p->hsync_end;
+ p->crtc_htotal = p->htotal;
+ p->crtc_hskew = p->hskew;
+ p->crtc_vdisplay = p->vdisplay;
+ p->crtc_vsync_start = p->vsync_start;
+ p->crtc_vsync_end = p->vsync_end;
+ p->crtc_vtotal = p->vtotal;
+
+ if (p->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
+ p->crtc_vdisplay /= 2;
+ p->crtc_vsync_start /= 2;
+ p->crtc_vsync_end /= 2;
+ p->crtc_vtotal /= 2;
+ }
+ }
+
+ if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
+ p->crtc_vdisplay *= 2;
+ p->crtc_vsync_start *= 2;
+ p->crtc_vsync_end *= 2;
+ p->crtc_vtotal *= 2;
+ }
+
+ if (p->vscan > 1) {
+ p->crtc_vdisplay *= p->vscan;
+ p->crtc_vsync_start *= p->vscan;
+ p->crtc_vsync_end *= p->vscan;
+ p->crtc_vtotal *= p->vscan;
+ }
+
+ p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
+ p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
+ p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
+ p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
+}
+
+
+/**
+ * drm_mode_copy - copy the mode
+ * @dst: mode to overwrite
+ * @src: mode to copy
+ *
+ * LOCKING:
+ * None.
+ *
+ * Copy an existing mode into another mode, preserving the object id
+ * of the destination mode.
+ */
+void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
+{
+ int id = dst->base.id;
+ struct list_head head = dst->head;
+
+ *dst = *src;
+ dst->base.id = id;
+ dst->head = head;
+}
+
+/**
+ * drm_mode_duplicate - allocate and duplicate an existing mode
+ * @m: mode to duplicate
+ *
+ * LOCKING:
+ * None.
+ *
+ * Just allocate a new mode, copy the existing mode into it, and return
+ * a pointer to it. Used to create new instances of established modes.
+ */
+struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ struct drm_display_mode *nmode;
+
+ nmode = drm_mode_create(dev);
+ if (!nmode)
+ return NULL;
+
+ drm_mode_copy(nmode, mode);
+
+ return nmode;
+}
+
+/**
+ * drm_mode_equal - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+{
+ /* do clock check convert to PICOS so fb modes get matched
+ * the same */
+ if (mode1->clock && mode2->clock) {
+ if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
+ return false;
+ } else if (mode1->clock != mode2->clock)
+ return false;
+
+ return drm_mode_equal_no_clocks(mode1, mode2);
+}
+
+/**
+ * drm_mode_equal_no_clocks - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent, but
+ * don't check the pixel clocks.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+{
+ if (mode1->hdisplay == mode2->hdisplay &&
+ mode1->hsync_start == mode2->hsync_start &&
+ mode1->hsync_end == mode2->hsync_end &&
+ mode1->htotal == mode2->htotal &&
+ mode1->hskew == mode2->hskew &&
+ mode1->vdisplay == mode2->vdisplay &&
+ mode1->vsync_start == mode2->vsync_start &&
+ mode1->vsync_end == mode2->vsync_end &&
+ mode1->vtotal == mode2->vtotal &&
+ mode1->vscan == mode2->vscan &&
+ mode1->flags == mode2->flags)
+ return true;
+
+ return false;
+}
+
+/**
+ * drm_mode_validate_size - make sure modes adhere to size constraints
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @maxX: maximum width
+ * @maxY: maximum height
+ * @maxPitch: max pitch
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * The DRM device (@dev) has size and pitch limits. Here we validate the
+ * modes we probed for @dev against those limits and set their status as
+ * necessary.
+ */
+/* LINTED */
+void drm_mode_validate_size(struct drm_device *dev,
+ struct list_head *mode_list,
+ int maxX, int maxY, int maxPitch)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, struct drm_display_mode, mode_list, head) {
+ if (maxPitch > 0 && mode->hdisplay > maxPitch)
+ mode->status = MODE_BAD_WIDTH;
+
+ if (maxX > 0 && mode->hdisplay > maxX)
+ mode->status = MODE_VIRTUAL_X;
+
+ if (maxY > 0 && mode->vdisplay > maxY)
+ mode->status = MODE_VIRTUAL_Y;
+ }
+}
+
+/**
+ * drm_mode_validate_clocks - validate modes against clock limits
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @min: minimum clock rate array
+ * @max: maximum clock rate array
+ * @n_ranges: number of clock ranges (size of arrays)
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Some code may need to check a mode list against the clock limits of the
+ * device in question. This function walks the mode list, testing to make
+ * sure each mode falls within a given range (defined by @min and @max
+ * arrays) and sets @mode->status as needed.
+ */
+/* LINTED */
+void drm_mode_validate_clocks(struct drm_device *dev,
+ struct list_head *mode_list,
+ int *min, int *max, int n_ranges)
+{
+ struct drm_display_mode *mode;
+ int i;
+
+ list_for_each_entry(mode, struct drm_display_mode, mode_list, head) {
+ bool good = false;
+ for (i = 0; i < n_ranges; i++) {
+ if (mode->clock >= min[i] && mode->clock <= max[i]) {
+ good = true;
+ break;
+ }
+ }
+ if (!good)
+ mode->status = MODE_CLOCK_RANGE;
+ }
+}
+
+/**
+ * drm_mode_prune_invalid - remove invalid modes from mode list
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @verbose: be verbose about it
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Once mode list generation is complete, a caller can use this routine to
+ * remove invalid modes from a mode list. If any of the modes have a
+ * status other than %MODE_OK, they are removed from @mode_list and freed.
+ */
+void drm_mode_prune_invalid(struct drm_device *dev,
+ struct list_head *mode_list, bool verbose)
+{
+ struct drm_display_mode *mode, *t;
+
+ list_for_each_entry_safe(mode, t, struct drm_display_mode, mode_list, head) {
+ if (mode->status != MODE_OK) {
+ list_del(&mode->head);
+ if (verbose) {
+ drm_mode_debug_printmodeline(mode);
+ DRM_DEBUG_KMS("Not using %s mode %d\n",
+ mode->name, mode->status);
+ }
+ drm_mode_destroy(dev, mode);
+ }
+ }
+}
+
+/**
+ * drm_mode_compare - compare modes for favorability
+ * @lh_a: list_head for first mode
+ * @lh_b: list_head for second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
+ * which is better.
+ *
+ * RETURNS:
+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
+ * positive if @lh_b is better than @lh_a.
+ */
+static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
+{
+ struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
+ struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
+ int diff;
+
+ diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
+ ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
+ if (diff)
+ return diff;
+ diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
+ if (diff)
+ return diff;
+
+ diff = b->vrefresh - a->vrefresh;
+ if (diff)
+ return diff;
+
+ diff = b->clock - a->clock;
+ return diff;
+}
+
+/**
+ * drm_mode_sort - sort mode list
+ * @mode_list: list to sort
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Sort @mode_list by favorability, putting good modes first.
+ */
+void drm_mode_sort(struct list_head *mode_list)
+{
+ struct list_head *cur, *end;
+ struct list_head *list, *temp;
+ int ordered = 1;
+
+ if (list_empty(mode_list))
+ return;
+
+ /* Pre-check the mode order
+ * In most cases, the modes is ordered.
+ */
+ for (list = mode_list->next, temp = list->next;
+ temp != mode_list;
+ list = temp, temp = temp->next) {
+ if (drm_mode_compare(list, temp) > 0){
+ ordered = 0;
+ break;
+ }
+ }
+
+ if (ordered)
+ return;
+
+ end = mode_list->next;
+ cur = end->next;
+ while (cur != mode_list) {
+ list_for_each_safe(list, temp, mode_list){
+ if (drm_mode_compare(list, cur) > 0) {
+ //insert
+ struct list_head *p_node = list->prev;
+ list_del(cur);
+ p_node->next = cur;
+ list->prev = cur;
+ cur->next = list;
+ cur->prev = p_node;
+
+ cur = end->next;
+ break;
+ }
+ if (list == end) {
+ end = cur;
+ cur = end->next;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * drm_mode_connector_list_update - update the mode list for the connector
+ * @connector: the connector to update
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * This moves the modes from the @connector probed_modes list
+ * to the actual mode list. It compares the probed mode against the current
+ * list and only adds different modes. All modes unverified after this point
+ * will be removed by the prune invalid modes.
+ */
+void drm_mode_connector_list_update(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ struct drm_display_mode *pmode, *pt;
+ int found_it;
+
+ list_for_each_entry_safe(pmode, pt, struct drm_display_mode,
+ &connector->probed_modes,
+ head) {
+ found_it = 0;
+ /* go through current modes checking for the new probed mode */
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head) {
+ if (drm_mode_equal(pmode, mode)) {
+ found_it = 1;
+ /* if equal delete the probed mode */
+ mode->status = pmode->status;
+ /* Merge type bits together */
+ mode->type |= pmode->type;
+ list_del(&pmode->head);
+ drm_mode_destroy(connector->dev, pmode);
+ break;
+ }
+ }
+
+ if (!found_it) {
+ list_move_tail(&pmode->head, &connector->modes, (caddr_t)pmode);
+ }
+ }
+}
+struct drm_display_mode *
+drm_mode_create_from_cmdline_mode(struct drm_device *dev,
+ struct drm_cmdline_mode *cmd)
+{
+ struct drm_display_mode *mode;
+
+ if (cmd->cvt)
+ mode = drm_cvt_mode(dev,
+ cmd->xres, cmd->yres,
+ cmd->refresh_specified ? cmd->refresh : 60,
+ cmd->rb, cmd->interlace,
+ cmd->margins);
+ else
+ mode = drm_gtf_mode(dev,
+ cmd->xres, cmd->yres,
+ cmd->refresh_specified ? cmd->refresh : 60,
+ cmd->interlace,
+ cmd->margins);
+ if (!mode)
+ return NULL;
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ return mode;
+}
diff --git a/usr/src/uts/common/io/drm/drm_msg.c b/usr/src/uts/common/io/drm/drm_msg.c
index 120776c..691a77b 100644
--- a/usr/src/uts/common/io/drm/drm_msg.c
+++ b/usr/src/uts/common/io/drm/drm_msg.c
@@ -1,34 +1,47 @@
/*
- * CDDL HEADER START
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
*
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
*
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "drmP.h"
void
+drm_debug_print(int cmn_err, const char *func, int line, const char *fmt, ...)
+{
+ va_list ap;
+ char format[256];
+
+ (void) snprintf(format, sizeof (format), "[drm:%s:%d] %s",
+ func, line, fmt);
+
+ va_start(ap, fmt);
+ vcmn_err(cmn_err, format, ap);
+ va_end(ap);
+}
+
+void
drm_debug(const char *fmt, ...)
{
va_list ap;
diff --git a/usr/src/uts/common/io/drm/drm_pci.c b/usr/src/uts/common/io/drm/drm_pci.c
index 37a02a1..530ea35 100644
--- a/usr/src/uts/common/io/drm/drm_pci.c
+++ b/usr/src/uts/common/io/drm/drm_pci.c
@@ -1,9 +1,11 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
*/
-/* BEGIN CSTYLED */
/**
* \file drm_pci.h
* \brief PCI consistent, DMA-accessible memory functions.
@@ -37,91 +39,18 @@
/**********************************************************************/
/** \name PCI memory */
/*@{*/
-/* END CSTYLED */
-
-#pragma ident "%Z%%M% %I% %E% SMI"
#include "drmP.h"
#include <vm/seg_kmem.h>
-#define PCI_DEVICE(x) (((x)>>11) & 0x1f)
-#define PCI_FUNCTION(x) (((x) & 0x700) >> 8)
-#define PCI_BUS(x) (((x) & 0xff0000) >> 16)
-
typedef struct drm_pci_resource {
- uint_t regnum;
+ uint_t regnum;
unsigned long offset;
unsigned long size;
} drm_pci_resource_t;
-int
-pci_get_info(drm_device_t *softstate, int *bus, int *slot, int *func)
-{
- int *regs_list;
- uint_t nregs = 0;
-
- if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, softstate->dip,
- DDI_PROP_DONTPASS, "reg", (int **)&regs_list, &nregs)
- != DDI_PROP_SUCCESS) {
- DRM_ERROR("pci_get_info: get pci function bus device failed");
- goto error;
- }
- *bus = (int)PCI_BUS(regs_list[0]);
- *slot = (int)PCI_DEVICE(regs_list[0]);
- *func = (int)PCI_FUNCTION(regs_list[0]);
-
- if (nregs > 0) {
- ddi_prop_free(regs_list);
- }
- return (DDI_SUCCESS);
-error:
- if (nregs > 0) {
- ddi_prop_free(regs_list);
- }
- return (DDI_FAILURE);
-}
-
-int
-pci_get_irq(drm_device_t *statep)
-{
- int irq;
-
- extern int drm_supp_get_irq(void *);
-
- irq = ddi_prop_get_int(DDI_DEV_T_ANY,
- statep->dip, DDI_PROP_DONTPASS, "interrupts", -1);
-
- if (irq > 0) {
- irq = drm_supp_get_irq(statep->drm_handle);
- }
-
- return (irq);
-}
-
-int
-pci_get_vendor(drm_device_t *statep)
-{
- int vendorid;
-
- vendorid = ddi_prop_get_int(DDI_DEV_T_ANY,
- statep->dip, DDI_PROP_DONTPASS, "vendor-id", 0);
-
- return (vendorid);
-}
-
-int
-pci_get_device(drm_device_t *statep)
-{
- int deviceid;
-
- deviceid = ddi_prop_get_int(DDI_DEV_T_ANY,
- statep->dip, DDI_PROP_DONTPASS, "device-id", 0);
-
- return (deviceid);
-}
-
void
-drm_core_ioremap(struct drm_local_map *map, drm_device_t *dev)
+drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
{
if ((map->type == _DRM_AGP) && dev->agp) {
/*
@@ -137,8 +66,8 @@ drm_core_ioremap(struct drm_local_map *map, drm_device_t *dev)
* After that, access to physical memory managed by agp gart
* hardware in kernel space doesn't go through agp hardware,
* it will be: kernel virtual ---> physical address.
- * Obviously, it is more efficient. But in solaris operating
- * system, the ioctl AGPIOC_ALLOCATE of apggart driver does
+ * Obviously, it is more efficient. But in Solaris operating
+ * system, the ioctl AGPIOC_ALLOCATE of agpgart driver does
* not return physical address. We are unable to create the
* direct mapping between kernel space and agp memory. So,
* we remove the calling to agp_remap().
@@ -150,10 +79,11 @@ drm_core_ioremap(struct drm_local_map *map, drm_device_t *dev)
}
}
-/*ARGSUSED*/
void
-drm_core_ioremapfree(struct drm_local_map *map, drm_device_t *dev)
+drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
{
+ _NOTE(ARGUNUSED(dev))
+
if (map->type != _DRM_AGP) {
if (map->handle && map->size)
drm_ioremapfree(map);
@@ -167,26 +97,6 @@ drm_core_ioremapfree(struct drm_local_map *map, drm_device_t *dev)
}
}
-struct drm_local_map *
-drm_core_findmap(drm_device_t *dev, unsigned long handle)
-{
- drm_local_map_t *map;
-
- DRM_SPINLOCK_ASSERT(&dev->dev_lock);
-
-/*
- * For the time being, we compare the low 32 bit only,
- * We will hash handle to 32-bit to solve this issue later.
- */
- TAILQ_FOREACH(map, &dev->maplist, link) {
- if ((((unsigned long)map->handle) & 0x00000000ffffffff)
- == (handle & 0x00000000ffffffff))
- return (map);
- }
-
- return (NULL);
-}
-
/*
* pci_alloc_consistent()
*/
@@ -202,54 +112,50 @@ static ddi_dma_attr_t hw_dma_attr = {
0xffffffff, /* seg */
1, /* sgllen */
4, /* granular */
- 0 /* flags */
+ DDI_DMA_FLAGERR, /* flags */
};
static ddi_device_acc_attr_t hw_acc_attr = {
DDI_DEVICE_ATTR_V0,
DDI_NEVERSWAP_ACC,
- DDI_STRICTORDER_ACC
+ DDI_STRICTORDER_ACC,
+ DDI_FLAGERR_ACC
};
-
void *
-drm_pci_alloc(drm_device_t *dev, size_t size,
- size_t align, dma_addr_t maxaddr, int segments)
+drm_pci_alloc(struct drm_device *dev, size_t size,
+ size_t align, dma_addr_t maxaddr, int segments)
{
- drm_dma_handle_t *dmah;
- uint_t count;
- int ret = DDI_FAILURE;
+ struct drm_dma_handle *dmah;
+ uint_t count;
- /* allocat continous physical memory for hw status page */
- if (align == 0)
- hw_dma_attr.dma_attr_align = 1;
- else
- hw_dma_attr.dma_attr_align = align;
+ /* allocate continous physical memory for hw status page */
+ hw_dma_attr.dma_attr_align = (!align) ? 1 : align;
hw_dma_attr.dma_attr_addr_hi = maxaddr;
hw_dma_attr.dma_attr_sgllen = segments;
- dmah = kmem_zalloc(sizeof (drm_dma_handle_t), KM_SLEEP);
- if (ret = ddi_dma_alloc_handle(dev->dip, &hw_dma_attr,
- DDI_DMA_SLEEP, NULL, &dmah->dma_hdl)) {
- DRM_ERROR("drm_pci_alloc:ddi_dma_alloc_handle failed\n");
+ dmah = kmem_zalloc(sizeof(struct drm_dma_handle), KM_SLEEP);
+
+ if (ddi_dma_alloc_handle(dev->devinfo, &hw_dma_attr,
+ DDI_DMA_SLEEP, NULL, &dmah->dma_hdl) != DDI_SUCCESS) {
+ DRM_ERROR("ddi_dma_alloc_handle() failed");
goto err3;
}
- if (ret = ddi_dma_mem_alloc(dmah->dma_hdl, size, &hw_acc_attr,
+ if (ddi_dma_mem_alloc(dmah->dma_hdl, size, &hw_acc_attr,
DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
DDI_DMA_SLEEP, NULL, (caddr_t *)&dmah->vaddr,
- &dmah->real_sz, &dmah->acc_hdl)) {
- DRM_ERROR("drm_pci_alloc: ddi_dma_mem_alloc failed\n");
+ &dmah->real_sz, &dmah->acc_hdl) != DDI_SUCCESS) {
+ DRM_ERROR("ddi_dma_mem_alloc() failed\n");
goto err2;
}
- ret = ddi_dma_addr_bind_handle(dmah->dma_hdl, NULL,
+ if (ddi_dma_addr_bind_handle(dmah->dma_hdl, NULL,
(caddr_t)dmah->vaddr, dmah->real_sz,
- DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
- DDI_DMA_SLEEP, NULL, &dmah->cookie, &count);
- if (ret != DDI_DMA_MAPPED) {
- DRM_ERROR("drm_pci_alloc: alloc phys memory failed");
+ DDI_DMA_RDWR|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
+ NULL, &dmah->cookie, &count) != DDI_DMA_MAPPED) {
+ DRM_ERROR("ddi_dma_addr_bind_handle() failed");
goto err1;
}
@@ -273,14 +179,11 @@ err3:
return (NULL);
}
-/*
- * pci_free_consistent()
- */
-/*ARGSUSED*/
void
-drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah)
+drm_pci_free(drm_dma_handle_t *dmah)
{
ASSERT(dmah != NULL);
+
(void) ddi_dma_unbind_handle(dmah->dma_hdl);
ddi_dma_mem_free(&dmah->acc_hdl);
ddi_dma_free_handle(&dmah->dma_hdl);
@@ -288,13 +191,13 @@ drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah)
}
int
-do_get_pci_res(drm_device_t *dev, drm_pci_resource_t *resp)
+do_get_pci_res(struct drm_device *dev, drm_pci_resource_t *resp)
{
int length;
pci_regspec_t *regs;
if (ddi_getlongprop(
- DDI_DEV_T_ANY, dev->dip, DDI_PROP_DONTPASS,
+ DDI_DEV_T_ANY, dev->devinfo, DDI_PROP_DONTPASS,
"assigned-addresses", (caddr_t)&regs, &length) !=
DDI_PROP_SUCCESS) {
DRM_ERROR("do_get_pci_res: ddi_getlongprop failed!\n");
@@ -309,16 +212,15 @@ do_get_pci_res(drm_device_t *dev, drm_pci_resource_t *resp)
return (0);
}
-/*ARGSUSED*/
unsigned long
-drm_get_resource_start(drm_device_t *softstate, unsigned int regnum)
+drm_get_resource_start(struct drm_device *dev, unsigned int regnum)
{
drm_pci_resource_t res;
int ret;
res.regnum = regnum;
- ret = do_get_pci_res(softstate, &res);
+ ret = do_get_pci_res(dev, &res);
if (ret != 0) {
DRM_ERROR("drm_get_resource_start: ioctl failed");
@@ -329,9 +231,8 @@ drm_get_resource_start(drm_device_t *softstate, unsigned int regnum)
}
-/*ARGSUSED*/
unsigned long
-drm_get_resource_len(drm_device_t *softstate, unsigned int regnum)
+drm_get_resource_len(struct drm_device *softstate, unsigned int regnum)
{
drm_pci_resource_t res;
int ret;
@@ -347,3 +248,4 @@ drm_get_resource_len(drm_device_t *softstate, unsigned int regnum)
return (res.size);
}
+
diff --git a/usr/src/uts/common/io/drm/drm_rect.c b/usr/src/uts/common/io/drm/drm_rect.c
new file mode 100644
index 0000000..2935f3f
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_rect.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_rect.h"
+
+/**
+ * drm_rect_intersect - intersect two rectangles
+ * @r1: first rectangle
+ * @r2: second rectangle
+ *
+ * Calculate the intersection of rectangles @r1 and @r2.
+ * @r1 will be overwritten with the intersection.
+ *
+ * RETURNS:
+ * %true if rectangle @r1 is still visible after the operation,
+ * %false otherwise.
+ */
+int drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
+{
+ r1->x1 = max(r1->x1, r2->x1);
+ r1->y1 = max(r1->y1, r2->y1);
+ r1->x2 = min(r1->x2, r2->x2);
+ r1->y2 = min(r1->y2, r2->y2);
+
+ return drm_rect_visible(r1);
+}
+
+/**
+ * drm_rect_clip_scaled - perform a scaled clip operation
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @clip: clip rectangle
+ * @hscale: horizontal scaling factor
+ * @vscale: vertical scaling factor
+ *
+ * Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
+ * same amounts multiplied by @hscale and @vscale.
+ *
+ * RETURNS:
+ * %true if rectangle @dst is still visible after being clipped,
+ * %false otherwise
+ */
+int drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
+ const struct drm_rect *clip,
+ int hscale, int vscale)
+{
+ int diff;
+
+ diff = clip->x1 - dst->x1;
+ if (diff > 0) {
+ int64_t tmp = src->x1 + (int64_t) diff * hscale;
+ src->x1 = clamp_int64_t(tmp);
+ }
+ diff = clip->y1 - dst->y1;
+ if (diff > 0) {
+ int64_t tmp = src->y1 + (int64_t) diff * vscale;
+ src->y1 = clamp_int64_t(tmp);
+ }
+ diff = dst->x2 - clip->x2;
+ if (diff > 0) {
+ int64_t tmp = src->x2 - (int64_t) diff * hscale;
+ src->x2 = clamp_int64_t(tmp);
+ }
+ diff = dst->y2 - clip->y2;
+ if (diff > 0) {
+ int64_t tmp = src->y2 - (int64_t) diff * vscale;
+ src->y2 = clamp_int64_t(tmp);
+ }
+
+ return drm_rect_intersect(dst, clip);
+}
+
+static int drm_calc_scale(int src, int dst)
+{
+ int scale = 0;
+
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+
+ if (dst == 0)
+ return 0;
+
+ scale = src / dst;
+
+ return scale;
+}
+
+/**
+ * drm_rect_calc_hscale - calculate the horizontal scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_hscale: minimum allowed horizontal scaling factor
+ * @max_hscale: maximum allowed horizontal scaling factor
+ *
+ * Calculate the horizontal scaling factor as
+ * (@src width) / (@dst width).
+ *
+ * RETURNS:
+ * The horizontal scaling factor, or errno of out of limits.
+ */
+int drm_rect_calc_hscale(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ int min_hscale, int max_hscale)
+{
+ int src_w = drm_rect_width(src);
+ int dst_w = drm_rect_width(dst);
+ int hscale = drm_calc_scale(src_w, dst_w);
+
+ if (hscale < 0 || dst_w == 0)
+ return hscale;
+
+ if (hscale < min_hscale || hscale > max_hscale)
+ return -ERANGE;
+
+ return hscale;
+}
+
+/**
+ * drm_rect_calc_vscale - calculate the vertical scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_vscale: minimum allowed vertical scaling factor
+ * @max_vscale: maximum allowed vertical scaling factor
+ *
+ * Calculate the vertical scaling factor as
+ * (@src height) / (@dst height).
+ *
+ * RETURNS:
+ * The vertical scaling factor, or errno of out of limits.
+ */
+int drm_rect_calc_vscale(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ int min_vscale, int max_vscale)
+{
+ int src_h = drm_rect_height(src);
+ int dst_h = drm_rect_height(dst);
+ int vscale = drm_calc_scale(src_h, dst_h);
+
+ if (vscale < 0 || dst_h == 0)
+ return vscale;
+
+ if (vscale < min_vscale || vscale > max_vscale)
+ return -ERANGE;
+
+ return vscale;
+}
+
+/**
+ * drm_calc_hscale_relaxed - calculate the horizontal scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_hscale: minimum allowed horizontal scaling factor
+ * @max_hscale: maximum allowed horizontal scaling factor
+ *
+ * Calculate the horizontal scaling factor as
+ * (@src width) / (@dst width).
+ *
+ * If the calculated scaling factor is below @min_vscale,
+ * decrease the height of rectangle @dst to compensate.
+ *
+ * If the calculated scaling factor is above @max_vscale,
+ * decrease the height of rectangle @src to compensate.
+ *
+ * RETURNS:
+ * The horizontal scaling factor.
+ */
+int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
+ struct drm_rect *dst,
+ int min_hscale, int max_hscale)
+{
+ int src_w = drm_rect_width(src);
+ int dst_w = drm_rect_width(dst);
+ int hscale = drm_calc_scale(src_w, dst_w);
+
+ if (hscale < 0 || dst_w == 0)
+ return hscale;
+
+ if (hscale < min_hscale) {
+ int max_dst_w = src_w / min_hscale;
+
+ drm_rect_adjust_size(dst, max_dst_w - dst_w, 0);
+
+ return min_hscale;
+ }
+
+ if (hscale > max_hscale) {
+ int max_src_w = dst_w * max_hscale;
+
+ drm_rect_adjust_size(src, max_src_w - src_w, 0);
+
+ return max_hscale;
+ }
+
+ return hscale;
+}
+
+/**
+ * drm_rect_calc_vscale_relaxed - calculate the vertical scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_vscale: minimum allowed vertical scaling factor
+ * @max_vscale: maximum allowed vertical scaling factor
+ *
+ * Calculate the vertical scaling factor as
+ * (@src height) / (@dst height).
+ *
+ * If the calculated scaling factor is below @min_vscale,
+ * decrease the height of rectangle @dst to compensate.
+ *
+ * If the calculated scaling factor is above @max_vscale,
+ * decrease the height of rectangle @src to compensate.
+ *
+ * RETURNS:
+ * The vertical scaling factor.
+ */
+int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
+ struct drm_rect *dst,
+ int min_vscale, int max_vscale)
+{
+ int src_h = drm_rect_height(src);
+ int dst_h = drm_rect_height(dst);
+ int vscale = drm_calc_scale(src_h, dst_h);
+
+ if (vscale < 0 || dst_h == 0)
+ return vscale;
+
+ if (vscale < min_vscale) {
+ int max_dst_h = src_h / min_vscale;
+
+ drm_rect_adjust_size(dst, 0, max_dst_h - dst_h);
+
+ return min_vscale;
+ }
+
+ if (vscale > max_vscale) {
+ int max_src_h = dst_h * max_vscale;
+
+ drm_rect_adjust_size(src, 0, max_src_h - src_h);
+
+ return max_vscale;
+ }
+
+ return vscale;
+}
+
+/**
+ * drm_rect_debug_print - print the rectangle information
+ * @r: rectangle to print
+ * @fixed_point: rectangle is in 16.16 fixed point format
+ */
+void drm_rect_debug_print(const struct drm_rect *r, int fixed_point)
+{
+ int w = drm_rect_width(r);
+ int h = drm_rect_height(r);
+
+ if (fixed_point)
+ DRM_DEBUG_KMS("%d.%06ux%d.%06u%+d.%06u%+d.%06u\n",
+ w >> 16, ((w & 0xffff) * 15625) >> 10,
+ h >> 16, ((h & 0xffff) * 15625) >> 10,
+ r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
+ r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
+ else
+ DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
+}
diff --git a/usr/src/uts/common/io/drm/drm_scatter.c b/usr/src/uts/common/io/drm/drm_scatter.c
index b1d1076..50fc361 100644
--- a/usr/src/uts/common/io/drm/drm_scatter.c
+++ b/usr/src/uts/common/io/drm/drm_scatter.c
@@ -1,13 +1,21 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
*/
-/* BEGIN CSTYLED */
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+/**
+ * \file drm_scatter.c
+ * IOCTLs to manage scatter/gather memory
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
-/* drm_scatter.h -- IOCTLs to manage scatter/gather memory -*- linux-c -*-
- * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com */
-/*-
+/*
+ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
+ *
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
@@ -29,32 +37,16 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
- * Eric Anholt <anholt@FreeBSD.org>
- *
*/
-/* END CSTYLED */
-
-#pragma ident "%Z%%M% %I% %E% SMI"
#include "drmP.h"
-#include <gfx_private.h>
#include "drm_io32.h"
-#define DEBUG_SCATTER 0
+#define DEBUG_SCATTER 0
-#ifdef _LP64
-#define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
-#else
-#define ScatterHandle(x) (unsigned int)(x)
-#endif
-
-void
-drm_sg_cleanup(drm_device_t *dev, drm_sg_mem_t *entry)
+void drm_sg_cleanup(struct drm_sg_mem *entry)
{
- int pages = entry->pages;
+ int pages = entry->pages;
if (entry->busaddr) {
kmem_free(entry->busaddr, sizeof (*entry->busaddr) * pages);
@@ -64,52 +56,46 @@ drm_sg_cleanup(drm_device_t *dev, drm_sg_mem_t *entry)
ASSERT(entry->umem_cookie == NULL);
if (entry->dmah_sg) {
- drm_pci_free(dev, entry->dmah_sg);
+ drm_pci_free(entry->dmah_sg);
entry->dmah_sg = NULL;
}
if (entry->dmah_gart) {
- drm_pci_free(dev, entry->dmah_gart);
+ drm_pci_free(entry->dmah_gart);
entry->dmah_gart = NULL;
}
- if (entry) {
- drm_free(entry, sizeof (drm_sg_mem_t), DRM_MEM_SGLISTS);
- entry = NULL;
- }
+ kfree(entry, sizeof (struct drm_sg_mem));
}
-/*ARGSUSED*/
-int
-drm_sg_alloc(DRM_IOCTL_ARGS)
+#ifdef _LP64
+#define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
+#else
+#define ScatterHandle(x) (unsigned int)(x)
+#endif
+
+int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
{
- DRM_DEVICE;
+ struct drm_sg_mem *entry;
unsigned long pages;
- drm_sg_mem_t *entry;
- drm_dma_handle_t *dmah;
- drm_scatter_gather_t request;
+ drm_dma_handle_t *dmah;
- DRM_DEBUG("%s\n", "drm_sg_alloc");
+ DRM_DEBUG("\n");
+
+ if (!drm_core_check_feature(dev, DRIVER_SG))
+ return -EINVAL;
if (dev->sg)
- return (EINVAL);
+ return -EINVAL;
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_scatter_gather_32_t request32;
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
- DRM_COPYFROM_WITH_RETURN(&request32, (void *)data,
- sizeof (request32));
- request.size = request32.size;
- request.handle = request32.handle;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request, (void *)data,
- sizeof (request));
+ (void) memset(entry, 0, sizeof(*entry));
+ pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+ DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
- pages = btopr(request.size);
- DRM_DEBUG("sg size=%ld pages=%ld\n", request.size, pages);
- entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
entry->pages = (int)pages;
dmah = drm_pci_alloc(dev, ptob(pages), 4096, 0xfffffffful, pages);
if (dmah == NULL)
@@ -119,68 +105,45 @@ drm_sg_alloc(DRM_IOCTL_ARGS)
entry->handle = ScatterHandle((unsigned long)dmah->vaddr);
entry->virtual = (void *)dmah->vaddr;
- request.handle = entry->handle;
+ request->handle = entry->handle;
entry->dmah_sg = dmah;
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_scatter_gather_32_t data32;
- data32.size = (uint32_t)request.size;
- data32.handle = (uint32_t)request.handle;
-
- DRM_COPYTO_WITH_RETURN((void *)data, &data32,
- sizeof (data32));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data, &request,
- sizeof (request));
-
- DRM_LOCK();
- if (dev->sg) {
- DRM_UNLOCK();
- drm_sg_cleanup(dev, entry);
- return (EINVAL);
- }
dev->sg = entry;
- DRM_UNLOCK();
- return (0);
+ return 0;
err_exit:
- drm_sg_cleanup(dev, entry);
- return (ENOMEM);
+ drm_sg_cleanup(entry);
+ return -ENOMEM;
}
-/*ARGSUSED*/
-int
-drm_sg_free(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_sg_alloc_ioctl(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_scatter_gather_t request;
- drm_sg_mem_t *entry;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_scatter_gather_32_t request32;
-
- DRM_COPYFROM_WITH_RETURN(&request32, (void *)data,
- sizeof (request32));
- request.size = request32.size;
- request.handle = request32.handle;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request, (void *)data,
- sizeof (request));
+ struct drm_scatter_gather *request = data;
+
+ return drm_sg_alloc(dev, request);
+
+}
+
+/* LINTED */
+int drm_sg_free(DRM_IOCTL_ARGS)
+{
+ struct drm_scatter_gather *request = data;
+ struct drm_sg_mem *entry;
+
+ if (!drm_core_check_feature(dev, DRIVER_SG))
+ return -EINVAL;
- DRM_LOCK();
entry = dev->sg;
dev->sg = NULL;
- DRM_UNLOCK();
- if (!entry || entry->handle != request.handle)
- return (EINVAL);
+ if (!entry || entry->handle != request->handle)
+ return -EINVAL;
+
+ DRM_DEBUG("virtual = %p\n", entry->virtual);
- drm_sg_cleanup(dev, entry);
+ drm_sg_cleanup(entry);
- return (0);
+ return 0;
}
diff --git a/usr/src/uts/common/io/drm/drm_stub.c b/usr/src/uts/common/io/drm/drm_stub.c
index ec82b0a..26501ac 100644
--- a/usr/src/uts/common/io/drm/drm_stub.c
+++ b/usr/src/uts/common/io/drm/drm_stub.c
@@ -353,6 +353,7 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
err_g2:
+err_mem:
kfree(new_minor, sizeof (*new_minor));
err_idr:
(void) idr_remove(&drm_minors_idr, minor_id);
@@ -489,6 +490,8 @@ void drm_put_dev(struct drm_device *dev)
if (dev->driver->unload)
dev->driver->unload(dev);
+ gfxp_mempool_destroy();
+
if (drm_core_has_AGP(dev) && dev->agp) {
drm_agp_cleanup(dev);
kfree(dev->agp, sizeof(*dev->agp));
diff --git a/usr/src/uts/common/io/drm/drm_sun_i2c.c b/usr/src/uts/common/io/drm/drm_sun_i2c.c
new file mode 100644
index 0000000..21b58e7
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sun_i2c.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include "drmP.h"
+
+#define mutex_lock_nested(a,b) mutex_enter(a)
+
+#define I2C_HI_CLOCK(adap, ret) \
+do { \
+ ret = i2c_setscl(adap); \
+ if (ret) { \
+ /* Other master keeps the clock low. */ \
+ /* Free the bus. */ \
+ i2c_setsda(adap); \
+ i2c_udelay(adap); \
+ return ret; \
+ } \
+} while (*"\0");
+
+static inline void
+i2c_udelay(struct i2c_adapter *adap)
+{
+ udelay((adap->udelay + 1) >> 1);
+}
+
+static inline int
+i2c_getsda(struct i2c_adapter *adap)
+{
+ return adap->getsda(adap->data) ? 1 : 0;
+}
+
+static inline void
+i2c_clrsda(struct i2c_adapter *adap)
+{
+ adap->setsda(adap->data, 0);
+}
+
+static inline void
+i2c_setsda(struct i2c_adapter *adap)
+{
+ adap->setsda(adap->data, 1);
+}
+
+static inline int
+i2c_getscl(struct i2c_adapter *adap)
+{
+ return adap->getscl(adap->data) ? 1 : 0;
+}
+
+static inline void
+i2c_clrscl(struct i2c_adapter *adap)
+{
+ adap->setscl(adap->data, 0);
+}
+
+static int
+i2c_setscl(struct i2c_adapter *adap)
+{
+ clock_t start;
+
+ adap->setscl(adap->data, 1);
+
+ /* Clock Synchronization */
+ start = ddi_get_lbolt();
+ while (!i2c_getscl(adap)) {
+ /* FIXME: Does ddi_get_lbolt() return negative
+ * value? If so, leave me.
+ */
+ if ((ddi_get_lbolt() - start) > adap->timeout)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int
+i2c_start(struct i2c_adapter *adap)
+{
+ int ret = 0;
+
+ /* Step 1: free the bus. */
+ i2c_setsda(adap);
+ i2c_udelay(adap);
+ ret = i2c_setscl(adap);
+ if (ret) {
+ /* Other master keeps the clock low.
+ * The bus is busy.
+ */
+ return ret;
+ }
+ if (!i2c_getsda(adap)) {
+ /* The bus is busy. */
+ return -EBUSY;
+ }
+ i2c_udelay(adap);
+
+ /* Step 2: (S/Sr) condition. */
+ i2c_clrsda(adap);
+ i2c_udelay(adap);
+
+ /* Step 3: free the clock. */
+ i2c_clrscl(adap);
+ i2c_udelay(adap);
+
+ return 0;
+}
+
+static int
+i2c_stop(struct i2c_adapter *adap)
+{
+ int ret;
+
+ if (i2c_getscl(adap)) {
+ /* Stop() must be called after start() or any
+ * transfer routines, which all free the clock
+ * before returning.
+ */
+ return -ENOTSUP;
+ }
+
+ /* Step 1: Free the data */
+ i2c_clrsda(adap);
+ i2c_udelay(adap);
+
+ /* Step 2: Hold the clock */
+ I2C_HI_CLOCK(adap, ret);
+ i2c_udelay(adap);
+
+ /* Step 3: (P) condition */
+ i2c_setsda(adap);
+ i2c_udelay(adap);
+ if (!i2c_getsda(adap)) {
+ /* Other master keeps the data low.
+ * The bus is busy.
+ */
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+i2c_write_byte(struct i2c_adapter *adap, unsigned char c)
+{
+ int needARB = 0;
+ int ret = 0, i;
+
+ if (i2c_getscl(adap)) {
+ /* Write() must be called after start() or any
+ * transfer routines, which all free the clock
+ * before returning.
+ */
+ return -ENOTSUP;
+ }
+
+ for (i = 7; i >= 0; i--) {
+ /* Step 1: set data. */
+ if (c & (1 << i)) {
+ needARB = 1;
+ i2c_setsda(adap);
+ } else {
+ needARB = 0;
+ i2c_clrsda(adap);
+ }
+ i2c_udelay(adap);
+
+ /* Step 2: hold the clock. */
+ I2C_HI_CLOCK(adap, ret);
+ if (needARB && !i2c_getsda(adap)) {
+ /* Do arbitration: lose the bus. */
+ return -EBUSY;
+ }
+ /* Double delay. */
+ i2c_udelay(adap);
+ i2c_udelay(adap);
+ if (needARB && !i2c_getsda(adap)) {
+ /* Do arbitration: someone performs (S) condition. */
+ return -EBUSY;
+ }
+
+ /* Step 3: free the clock. */
+ i2c_clrscl(adap);
+ i2c_udelay(adap);
+ }
+
+ return 0;
+}
+
+static int
+i2c_read_byte(struct i2c_adapter *adap, unsigned char *cp)
+{
+ int ret, r, i;
+
+ if (i2c_getscl(adap)) {
+ /* Read() must be called after start() or any
+ * transfer routines, which all free the clock
+ * before returning.
+ */
+ return -ENOTSUP;
+ }
+
+ i2c_setsda(adap);
+ i2c_udelay(adap);
+
+ *cp = 0;
+ for (i = 7; i >= 0; i--) {
+ /* Step 1: hold the clock. */
+ I2C_HI_CLOCK(adap, ret);
+ r = i2c_getsda(adap);
+ i2c_udelay(adap);
+
+ /* Step 2: read the data. */
+ if (r != i2c_getsda(adap)) {
+ /* Do arbitration: someone performs (S/Sr/P) condition. */
+ return -EBUSY;
+ }
+ if (r)
+ *cp |= (1 << i);
+ i2c_udelay(adap);
+
+ /* Step 3: free the clock */
+ i2c_clrscl(adap);
+ i2c_udelay(adap);
+ }
+
+ return 0;
+}
+
+static int
+i2c_ack(struct i2c_adapter *adap)
+{
+ int ret;
+
+ if (i2c_getscl(adap)) {
+ /* Ack() must be called after start() or any
+ * transfer routines, which all free the clock
+ * before returning.
+ */
+ return -ENOTSUP;
+ }
+
+ /* Step 1: free the data. */
+ i2c_clrsda(adap);
+ i2c_udelay(adap);
+
+ /* Step 2: hold the clock. */
+ I2C_HI_CLOCK(adap, ret);
+ i2c_udelay(adap);
+
+ /* Step 3: free the clock */
+ i2c_clrscl(adap);
+ i2c_udelay(adap);
+
+ return 0;
+}
+
+static int
+i2c_no_ack(struct i2c_adapter *adap)
+{
+ int ret;
+
+ if (i2c_getscl(adap)) {
+ /* Nak() must be called after start() or any
+ * transfer routines, which all free the clock
+ * before returning.
+ */
+ return -ENOTSUP;
+ }
+
+ /* Step 1: hold the data. */
+ i2c_setsda(adap);
+ i2c_udelay(adap);
+
+ /* Step 2: hold the clock. */
+ I2C_HI_CLOCK(adap, ret);
+ if (!i2c_getsda(adap)) {
+ /* Other master keeps the data low. */
+ return -EBUSY;
+ }
+ i2c_udelay(adap);
+ if (!i2c_getsda(adap)) {
+ /* Do arbitration: someone performs (S/Sr) condition. */
+ return -EBUSY;
+ }
+
+ /* Step 3: free the clock */
+ i2c_clrscl(adap);
+ i2c_udelay(adap);
+
+ return 0;
+}
+
+static int
+i2c_wait_ack(struct i2c_adapter *adap)
+{
+ int ret;
+
+ if (i2c_getscl(adap)) {
+ /* Wack() must be called after start() or any
+ * transfer routines, which all free the clock
+ * before returning.
+ */
+ return -ENOTSUP;
+ }
+
+ /* Step 1: hold the data. */
+ i2c_setsda(adap);
+ i2c_udelay(adap);
+
+ /* Step 2: hold the clock. */
+ I2C_HI_CLOCK(adap, ret);
+ i2c_udelay(adap);
+
+ /* Step 3: read the data. */
+ ret = i2c_getsda(adap) ? 0 : 1;
+
+ /* Step 4: free the clock */
+ i2c_clrscl(adap);
+ i2c_udelay(adap);
+
+ return ret;
+}
+
+static int
+i2c_write_msg(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ int i, ret;
+
+ for (i = 0; i < msg->len; i++) {
+ ret = i2c_write_byte(adap, msg->buf[i]);
+ if (ret)
+ return ret;
+
+ ret = i2c_wait_ack(adap);
+ if (ret == 1)
+ continue;
+ else if (ret == 0)
+ return -ENXIO;
+ else
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i2c_read_msg(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ unsigned char c;
+ int i, ret;
+
+ for (i = 0; i < msg->len; i++) {
+ ret = i2c_read_byte(adap, &c);
+ if (ret)
+ return ret;
+
+ msg->buf[i] = c;
+
+ if (i < msg->len - 1)
+ ret = i2c_ack(adap);
+ else
+ ret = i2c_no_ack(adap);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i2c_address(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ unsigned char addr;
+ int ret;
+
+ addr = msg->addr << 1;
+ if (msg->flags & I2C_M_RD)
+ addr |= 1;
+
+ ret = i2c_write_byte(adap, addr);
+ if (ret)
+ return ret;
+
+ ret = i2c_wait_ack(adap);
+ if (ret == 1)
+ return 0;
+ else if (ret == 0)
+ return -ENXIO;
+
+ return ret;
+}
+
+static int
+i2c_do_transfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ struct i2c_msg *msg;
+ int i, ret = 0;
+
+ for (i = 0; i < num; i++) {
+ msg = &msgs[i];
+
+ if (!(i && (msg->flags & I2C_M_NOSTART))) {
+ ret = i2c_start(adap);
+ if (ret)
+ return ret;
+
+ ret = i2c_address(adap, msg);
+ if (ret)
+ return ret;
+ }
+
+ if (msg->flags & I2C_M_RD)
+ ret = i2c_read_msg(adap, msg);
+ else
+ ret = i2c_write_msg(adap, msg);
+ if (ret)
+ return ret;
+ }
+
+ ret = i2c_stop(adap);
+ if (ret)
+ return ret;
+
+ return num;
+}
+
+struct i2c_algorithm i2c_bit_algo = {
+ .master_xfer = i2c_do_transfer,
+ .functionality = NULL,
+};
+
+int
+i2c_bit_add_bus(struct i2c_adapter *adap)
+{
+ if (!adap->setscl || !adap->getscl || !adap->setsda || !adap->getsda)
+ return -EINVAL;
+
+ adap->algo = (struct i2c_algorithm *)&i2c_bit_algo;
+ adap->retries = 3;
+
+ return 0;
+}
+
+int
+i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ clock_t start;
+ int i, ret = 0;
+
+ mutex_enter(&adap->bus_lock);
+ start = ddi_get_lbolt();
+ for (i = 0; i <= adap->retries; i++) {
+ ret = adap->algo->master_xfer(adap, msgs, num);
+ switch (ret) {
+ case 0:
+ case ETIMEDOUT:
+ goto do_exit;
+ default:
+ break;
+ }
+
+ /* FIXME: Does ddi_get_lbolt() return negative
+ * value? If so, leave me.
+ */
+ if ((ddi_get_lbolt() - start) > adap->timeout)
+ break;
+ }
+
+do_exit:
+ mutex_exit(&adap->bus_lock);
+ return ret;
+}
diff --git a/usr/src/uts/common/io/drm/drm_sun_idr.c b/usr/src/uts/common/io/drm/drm_sun_idr.c
new file mode 100644
index 0000000..4b59878
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sun_idr.c
@@ -0,0 +1,488 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include <sys/kmem.h>
+#include "drmP.h"
+#include "drm_linux_list.h"
+#include "drm_sun_idr.h"
+
+static inline int
+fr_isfull(struct idr_free_id_range *range)
+{
+ return range->min_unused_id >= range->end;
+}
+
+static struct idr_free_id_range *
+fr_new(int start)
+{
+ struct idr_free_id_range *this;
+
+ this = kmem_zalloc(sizeof(struct idr_free_id_range), KM_SLEEP);
+ this->start = start;
+ this->end = 0x7fffffff;
+ this->min_unused_id = start;
+
+ return this;
+}
+
+static struct idr_free_id_range *
+fr_destroy(struct idr_free_id_range *range)
+{
+ struct idr_free_id_range *ret;
+ struct idr_free_id *id;
+
+ while (range->free_ids) {
+ id = range->free_ids;
+ range->free_ids = range->free_ids->next;
+ kmem_free(id, sizeof(struct idr_free_id));
+ }
+
+ ret = range->next;
+ kmem_free(range, sizeof(struct idr_free_id_range));
+
+ return ret;
+}
+
+static struct idr_free_id_range *
+fr_get(struct idr_free_id_range *list, int start)
+{
+ struct idr_free_id_range *entry = list;
+
+ while (entry && (entry->start != start))
+ entry = entry->next;
+ return entry;
+}
+
+static struct idr_free_id_range *
+fr_insert(struct idr_free_id_range *list, int start)
+{
+ struct idr_free_id_range *prev = list;
+ struct idr_free_id_range *n;
+ struct idr_free_id *id, **pid;
+
+ while (prev->next && prev->next->start < start)
+ prev = prev->next;
+
+ n = fr_new(start);
+
+ /* link the new range */
+ n->next = prev->next;
+ prev->next = n;
+
+ /* change the end of the ranges */
+ prev->end = start;
+ if (n->next)
+ n->end = n->next->start;
+
+ if (fr_isfull(prev)) {
+ /* change the min_unused_id of the ranges */
+ n->min_unused_id = prev->min_unused_id;
+ prev->min_unused_id = start;
+
+ /* link free id to the new range */
+ pid = &prev->free_ids;
+ while (*pid) {
+ if ((*pid)->id < start) {
+ pid = &((*pid)->next);
+ continue;
+ }
+ id = *pid;
+
+ /* remove from the prev range */
+ (*pid) = id->next;
+
+ /* link to the new range */
+ id->next = n->free_ids;
+ n->free_ids = id;
+ }
+ }
+
+ return n;
+}
+
+static int
+idr_compare(const void *a, const void *b)
+{
+ const struct idr_used_id *pa = a;
+ const struct idr_used_id *pb = b;
+
+ if (pa->id < pb->id)
+ return (-1);
+ else if (pa->id > pb->id)
+ return (1);
+ else
+ return (0);
+}
+
+static int
+idr_get_free_id_in_range(struct idr_free_id_range* range)
+{
+ struct idr_free_id *idp;
+ int id;
+
+ if (range->free_ids) {
+ idp = range->free_ids;
+ range->free_ids = idp->next;
+ id = idp->id;
+ kmem_free(idp, sizeof(struct idr_free_id));
+ return (id);
+ }
+
+ if (!fr_isfull(range)) {
+ id = range->min_unused_id;
+ range->min_unused_id++;
+ return (id);
+ }
+
+ return (-1);
+}
+
+void
+idr_init(struct idr *idrp)
+{
+ avl_create(&idrp->used_ids, idr_compare, sizeof(struct idr_used_id),
+ offsetof(struct idr_used_id, link));
+
+ idrp->free_id_ranges = fr_new(0);
+ mutex_init(&idrp->lock, NULL, MUTEX_DRIVER, NULL);
+}
+
+int
+idr_get_new_above(struct idr *idrp, void *obj, int start, int *newid)
+{
+ struct idr_used_id *used;
+ struct idr_free_id_range *range;
+ int id;
+
+ if (start < 0)
+ return (-EINVAL);
+ mutex_enter(&idrp->lock);
+ range = fr_get(idrp->free_id_ranges, start);
+ if (!range)
+ range = fr_insert(idrp->free_id_ranges, start);
+
+ while (range) {
+ id = idr_get_free_id_in_range(range);
+ if (id >= 0)
+ goto got_id;
+ range = range->next;
+ }
+ mutex_exit(&idrp->lock);
+ return (-1);
+
+got_id:
+ used = kmem_alloc(sizeof(struct idr_used_id), KM_NOSLEEP);
+ if (!used) {
+ mutex_exit(&idrp->lock);
+ return (-ENOMEM);
+ }
+
+ used->id = id;
+ used->obj = obj;
+ avl_add(&idrp->used_ids, used);
+
+ *newid = id;
+ mutex_exit(&idrp->lock);
+ return (0);
+}
+
+static struct idr_used_id *
+idr_find_used_id(struct idr *idrp, uint32_t id)
+{
+ struct idr_used_id match;
+ struct idr_used_id *ret;
+
+ match.id = id;
+
+ ret = avl_find(&idrp->used_ids, &match, NULL);
+ if (ret) {
+ return (ret);
+ }
+
+ return (NULL);
+}
+
+void *
+idr_find(struct idr *idrp, uint32_t id)
+{
+ struct idr_used_id *ret;
+
+ mutex_enter(&idrp->lock);
+ ret = idr_find_used_id(idrp, id);
+ if (ret) {
+ mutex_exit(&idrp->lock);
+ return (ret->obj);
+ }
+
+ mutex_exit(&idrp->lock);
+ return (NULL);
+}
+
+int
+idr_remove(struct idr *idrp, uint32_t id)
+{
+ struct idr_free_id_range *range;
+ struct idr_used_id *ide;
+ struct idr_free_id *fid;
+
+ mutex_enter(&idrp->lock);
+ ide = idr_find_used_id(idrp, id);
+ if (!ide) {
+ mutex_exit(&idrp->lock);
+ return (-EINVAL);
+ }
+
+ fid = kmem_alloc(sizeof(struct idr_free_id), KM_NOSLEEP);
+ if (!fid) {
+ mutex_exit(&idrp->lock);
+ return (-ENOMEM);
+ }
+ fid->id = id;
+
+
+ range = idrp->free_id_ranges;
+ while (range->end <= id)
+ range = range->next;
+ fid->next = range->free_ids;
+ range->free_ids = fid;
+ avl_remove(&idrp->used_ids, ide);
+ kmem_free(ide, sizeof (struct idr_used_id));
+ mutex_exit(&idrp->lock);
+
+ return (0);
+}
+
+void
+idr_remove_all(struct idr *idrp)
+{
+ idr_destroy(idrp);
+ idr_init(idrp);
+}
+
+void *
+idr_replace(struct idr *idrp, void *obj, uint32_t id)
+{
+ struct idr_used_id *ide;
+ void *ret;
+ mutex_enter(&idrp->lock);
+ ide = idr_find_used_id(idrp, id);
+ if (!ide) {
+ mutex_exit(&idrp->lock);
+ return (void*)(-EINVAL);
+ }
+
+ ret = ide->obj;
+ ide->obj = obj;
+ mutex_exit(&idrp->lock);
+ return ret;
+}
+
+int
+idr_for_each(struct idr *idrp, int (*fn)(int id, void *p, void *data), void *data)
+{
+ struct idr_used_id *ide;
+ int ret = 0;
+
+ ide = avl_first(&idrp->used_ids);
+ while (ide) {
+ ret = fn(ide->id, ide->obj, data);
+ if (ret)
+ break;
+
+ /* idr node has been removed by fn */
+ ide = AVL_NEXT(&idrp->used_ids, ide);
+ }
+
+ return ret;
+}
+
+int
+/* LINTED */
+idr_pre_get(struct idr *idrp, int flag) {
+ return (-1);
+}
+
+void
+idr_destroy(struct idr *idrp)
+{
+ struct idr_free_id_range *range;
+ struct idr_used_id *ide;
+ void *cookie = NULL;
+
+ while (ide = avl_destroy_nodes(&idrp->used_ids, &cookie))
+ kmem_free(ide, sizeof (struct idr_used_id));
+ avl_destroy(&idrp->used_ids);
+
+ range = idrp->free_id_ranges;
+ while (range)
+ range = fr_destroy(range);
+ idrp->free_id_ranges = NULL;
+
+ mutex_destroy(&idrp->lock);
+}
+
+
+uint32_t fr_id = 0;
+uint32_t fr_time = 0;
+
+int
+/* LINTED */
+idr_list_pre_get(struct idr_list *head, int flag) {
+ return (-1);
+}
+
+void
+idr_list_init(struct idr_list *head)
+{
+ struct idr_list *entry;
+ /* HASH for accelerate */
+ entry = kmem_zalloc(DRM_GEM_OBJIDR_HASHNODE
+ * sizeof (struct idr_list), KM_SLEEP);
+ head->next = entry;
+ for (int i = 0; i < DRM_GEM_OBJIDR_HASHNODE; i++) {
+ INIT_LIST_HEAD(&entry[i]);
+ }
+}
+
+int
+idr_list_get_new_above(struct idr_list *head,
+ void *obj,
+ int *handlep)
+{
+ struct idr_list *entry, *node, *temp;
+ int key, id;
+ void *obj_temp;
+
+ ASSERT(fr_id <= 0x7fffffff);
+ id = ++fr_id;
+ if (id == 0x7fffffff) {
+ fr_time++;
+ id = fr_id = 1;
+ }
+ if (fr_time) {
+ /* find available id */
+ do {
+ obj_temp = idr_list_find(head, id);
+ } while ((obj_temp != NULL) && (++id < 0x7fffffff));
+ if (id < 0x7fffffff) {
+ fr_id = id;
+ } else {
+ fr_id = 0;
+ return (-EAGAIN);
+ }
+ }
+ entry = kmem_zalloc(sizeof (*entry), KM_NOSLEEP);
+ if (entry == NULL)
+ return (-1);
+ ASSERT(id <= 0x7fffffff);
+ key = id % DRM_GEM_OBJIDR_HASHNODE;
+ entry->obj = obj;
+ entry->handle = id;
+
+ /* list add */
+ node = &head->next[key];
+ temp = node->next;
+ entry->prev = node;
+ entry->next = node->next;
+ temp->prev = entry;
+ node->next = entry;
+
+ *handlep = id;
+ return (0);
+}
+
+void *
+idr_list_find(struct idr_list *head,
+ uint32_t name)
+{
+ struct idr_list *entry;
+ int key;
+ key = name % DRM_GEM_OBJIDR_HASHNODE;
+
+ list_for_each(entry, &head->next[key]) {
+ if (entry->handle == name)
+ return (entry->obj);
+ }
+ return (NULL);
+}
+
+#define list_del_idr_list_node(ptr) \
+do { \
+ struct idr_list *n_node = (ptr)->next; \
+ struct idr_list *p_node = (ptr)->prev; \
+ n_node->prev = (ptr)->prev; \
+ p_node->next = (ptr)->next; \
+ (ptr)->prev = NULL; \
+ (ptr)->next = NULL; \
+} while (*"\0")
+
+
+int
+idr_list_remove(struct idr_list *head,
+ uint32_t name)
+{
+ struct idr_list *entry, *temp;
+ int key;
+ key = name % DRM_GEM_OBJIDR_HASHNODE;
+
+ list_for_each_safe(entry, temp, &head->next[key]) {
+ if (entry->handle == name) {
+ list_del_idr_list_node(entry);
+ kmem_free(entry, sizeof (*entry));
+ return (0);
+ }
+ }
+ DRM_ERROR("Failed to remove the object %d", name);
+ return (-1);
+}
+
+void
+idr_list_free(struct idr_list *head)
+{
+ struct idr_list *entry, *temp;
+ for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) {
+ list_for_each_safe(entry, temp, &head->next[key]) {
+ list_del_idr_list_node(entry);
+ kmem_free(entry, sizeof (*entry));
+ }
+ }
+ kmem_free(head->next,
+ DRM_GEM_OBJIDR_HASHNODE * sizeof (struct idr_list));
+ head->next = NULL;
+}
+
+int
+idr_list_empty(struct idr_list *head)
+{
+ int empty;
+ for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) {
+ empty = list_empty(&(head)->next[key]);
+ if (!empty)
+ return (empty);
+ }
+ return (1);
+}
diff --git a/usr/src/uts/common/io/drm/drm_sun_pci.c b/usr/src/uts/common/io/drm/drm_sun_pci.c
new file mode 100644
index 0000000..6bdc517
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sun_pci.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/conf.h>
+#include <sys/kmem.h>
+#include <sys/visual_io.h>
+#include <sys/fbio.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/open.h>
+#include <sys/modctl.h>
+#include <sys/pci.h>
+#include <sys/kd.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/sunldi.h>
+#include <sys/mkdev.h>
+#include "drmP.h"
+#include <sys/agpgart.h>
+#include <sys/agp/agpdefs.h>
+#include <sys/agp/agpmaster_io.h>
+
+#define PCI_BUS(x) (((x) & 0xff0000) >> 16)
+#define PCI_SLOT(x) (((x)>>11) & 0x1f)
+#define PCI_FUNC(x) (((x) & 0x700) >> 8)
+
+struct pci_dev *
+pci_dev_create(struct drm_device *dev)
+{
+ dev_info_t *dip = dev->devinfo;
+ pci_regspec_t *regspec;
+ struct pci_dev *pdev;
+ int *regs, ret, len, i;
+ uint_t nregs = 0;
+
+ pdev = kmem_zalloc(sizeof(struct pci_dev), KM_NOSLEEP);
+ if (!pdev)
+ return (NULL);
+
+ /* access handle */
+ ret = pci_config_setup(dip, &pdev->pci_cfg_acc_handle);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("pci_config_setup() failed");
+ goto err_setup;
+ }
+
+ /* XXX Fix domain number (alpha hoses) */
+ pdev->domain = 0;
+
+ /* bus, slot, func */
+ ret = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "reg", (int **)&regs, &nregs);
+ if (ret != DDI_PROP_SUCCESS) {
+ DRM_ERROR("ddi_prop_lookup_int_array() failed");
+ goto err_info;
+ }
+ pdev->bus = (int)PCI_BUS(regs[0]);
+ pdev->slot = (int)PCI_SLOT(regs[0]);
+ pdev->func = (int)PCI_FUNC(regs[0]);
+ ddi_prop_free(regs);
+
+ /* irq */
+ ret = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "interrupts", -1);
+ if (ret == -1) {
+ DRM_ERROR("ddi_prop_get_int() failed");
+ goto err_irq;
+ }
+ if (ret > 0)
+ pdev->irq = pci_config_get8(pdev->pci_cfg_acc_handle, PCI_CONF_ILINE);
+
+ if (ddi_intr_hilevel(dip, 0) != 0) {
+ DRM_ERROR("high-level interrupts are not supported");
+ goto err_irq;
+ }
+
+ if (ddi_get_iblock_cookie(dip, (uint_t)0,
+ &pdev->intr_block) != DDI_SUCCESS) {
+ DRM_ERROR("cannot get iblock cookie");
+ goto err_irq;
+ }
+
+ /* regions */
+ ret = ddi_getlongprop(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "assigned-addresses", (caddr_t)&regspec, &len);
+ if (ret != DDI_PROP_SUCCESS) {
+ DRM_ERROR("ddi_getlongprop() failed");
+ goto err_regions;
+ }
+ for (i = 0; i < PCI_CONFIG_REGION_NUMS; i++) {
+ pdev->regions[i].start =
+ (unsigned long)regspec[i].pci_phys_low;
+ pdev->regions[i].size =
+ (unsigned long)regspec[i].pci_size_low;
+ }
+ kmem_free(regspec, (size_t)len);
+
+ /* vendor, device */
+ pdev->vendor = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dev->devinfo, DDI_PROP_DONTPASS, "vendor-id", 0);
+ pdev->device = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dev->devinfo, DDI_PROP_DONTPASS, "device-id", 0);
+
+ pdev->dev = dev;
+ return pdev;
+
+err_regions:
+err_irq:
+err_info:
+ pci_config_teardown(&pdev->pci_cfg_acc_handle);
+err_setup:
+ kmem_free(pdev, sizeof(struct pci_dev));
+ return (NULL);
+}
+
+void pci_dev_destroy(struct pci_dev *pdev)
+{
+ pci_config_teardown(&pdev->pci_cfg_acc_handle);
+ kmem_free(pdev, sizeof(struct pci_dev));
+}
+
+void pci_read_config_byte(struct pci_dev *pdev, int where, u8 *val)
+{
+ *val = pci_config_get8(pdev->pci_cfg_acc_handle, where);
+}
+
+void pci_read_config_word(struct pci_dev *pdev, int where, u16 *val)
+{
+ *val = pci_config_get16(pdev->pci_cfg_acc_handle, where);
+}
+
+void pci_read_config_dword(struct pci_dev *pdev, int where, u32 *val)
+{
+ *val = pci_config_get32(pdev->pci_cfg_acc_handle, where);
+}
+
+void pci_write_config_byte(struct pci_dev *pdev, int where, u8 val)
+{
+ pci_config_put8(pdev->pci_cfg_acc_handle, where, val);
+}
+
+void pci_write_config_word(struct pci_dev *pdev, int where, u16 val)
+{
+ pci_config_put16(pdev->pci_cfg_acc_handle, where, val);
+}
+
+void pci_write_config_dword(struct pci_dev *pdev, int where, u32 val)
+{
+ pci_config_put32(pdev->pci_cfg_acc_handle, where, val);
+}
+
+/* LINTED */
+u8* pci_map_rom(struct pci_dev *pdev, size_t *size)
+{
+ u32 base;
+
+ base = 0xC0000;
+ *size = 0x20000;
+
+ return (u8*)drm_sun_ioremap(base, *size, DRM_MEM_UNCACHED);
+}
+
+/* LINTED */
+void pci_unmap_rom(struct pci_dev *pdev, u8 *base)
+{
+ iounmap(base);
+}
+
+int pci_find_capability(struct pci_dev *pdev, int capid)
+{
+ uint8_t cap = 0;
+ uint16_t caps_ptr;
+
+ /* has capabilities list ? */
+ if ((pci_config_get16(pdev->pci_cfg_acc_handle,
+ PCI_CONF_STAT) & PCI_CONF_CAP_MASK) == 0)
+ return (0);
+
+ caps_ptr = pci_config_get8(
+ pdev->pci_cfg_acc_handle, PCI_CONF_CAP_PTR);
+ while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
+ cap = pci_config_get32(
+ pdev->pci_cfg_acc_handle, caps_ptr);
+ if ((cap & PCI_CONF_CAPID_MASK) == capid)
+ return (cap);
+ caps_ptr = pci_config_get8(
+ pdev->pci_cfg_acc_handle,
+ caps_ptr + PCI_CAP_NEXT_PTR);
+ }
+
+ return (0);
+}
diff --git a/usr/src/uts/common/io/drm/drm_sun_timer.c b/usr/src/uts/common/io/drm/drm_sun_timer.c
new file mode 100644
index 0000000..b8039c7
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sun_timer.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include "drm_sun_timer.h"
+
+void
+init_timer(struct timer_list *timer)
+{
+ mutex_init(&timer->lock, NULL, MUTEX_DRIVER, NULL);
+}
+
+void
+destroy_timer(struct timer_list *timer)
+{
+ mutex_destroy(&timer->lock);
+}
+
+void
+setup_timer(struct timer_list *timer, void (*func)(void *), void *arg)
+{
+ timer->func = func;
+ timer->arg = arg;
+ timer->expired_time = 0;
+}
+
+void
+mod_timer(struct timer_list *timer, clock_t expires)
+{
+ mutex_enter(&timer->lock);
+ /*
+ * If timer is already being updated, let previous caller do the work
+ * Note that we must drop timer->lock before the untimeout, as the
+ * callback might call mod_timer and deadlock. So the guard is
+ * centered around the overloading of the timer->expires element.
+ */
+ if (timer->expires == -1) {
+ mutex_exit(&timer->lock);
+ return;
+ }
+ timer->expires = -1;
+ mutex_exit(&timer->lock);
+
+ (void) untimeout(timer->timer_id);
+ timer->expired_time = jiffies + expires;
+ timer->timer_id = timeout(timer->func, timer->arg, expires);
+
+ mutex_enter(&timer->lock);
+ timer->expires = 0;
+ mutex_exit(&timer->lock);
+}
+
+void
+del_timer(struct timer_list *timer)
+{
+ (void) untimeout(timer->timer_id);
+}
+
+void
+test_set_timer(struct timer_list *timer, clock_t expires)
+{
+ if (time_after(jiffies, timer->expired_time)) {
+ timer->expired_time = jiffies + expires;
+ timer->timer_id = timeout(timer->func, timer->arg, expires);
+ }
+}
+
diff --git a/usr/src/uts/common/io/drm/drm_sun_workqueue.c b/usr/src/uts/common/io/drm/drm_sun_workqueue.c
new file mode 100644
index 0000000..8a5ba24
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sun_workqueue.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include <sys/sunddi.h>
+#include <sys/types.h>
+
+#include "drm_sun_workqueue.h"
+
+int
+__queue_work(struct workqueue_struct *wq, struct work_struct *work)
+{
+ int ret;
+
+ ASSERT(wq->taskq != NULL);
+ ASSERT(work->func != NULL);
+ /*
+ * ddi_taskq_dispatch can fail if there aren't enough memory
+ * resources. In theory, since we are requesting a SLEEP
+ * allocation, it would be very rare to fail
+ */
+ if ((ret = ddi_taskq_dispatch(wq->taskq, work->func, work, DDI_SLEEP))
+ == DDI_FAILURE)
+ cmn_err(CE_WARN, "queue_work: ddi_taskq_dispatch failure");
+ return (ret);
+}
+
+void
+init_work(struct work_struct *work, void (*func)(void *))
+{
+ work->func = func;
+}
+
+struct workqueue_struct *
+create_workqueue(dev_info_t *dip, char *name)
+{
+ struct workqueue_struct *wq;
+
+ wq = kmem_zalloc(sizeof (struct workqueue_struct), KM_SLEEP);
+ wq->taskq = ddi_taskq_create(dip, name, 1, TASKQ_DEFAULTPRI, 0);
+ if (wq->taskq == NULL)
+ goto fail;
+ wq->name = name;
+
+ return wq;
+
+fail :
+ kmem_free(wq, sizeof (struct workqueue_struct));
+ return (NULL);
+}
+
+void
+destroy_workqueue(struct workqueue_struct *wq)
+{
+ if (wq) {
+ ddi_taskq_destroy(wq->taskq);
+ kmem_free(wq, sizeof (struct workqueue_struct));
+ }
+}
+
+void
+cancel_delayed_work(struct workqueue_struct *wq)
+{
+ ddi_taskq_wait(wq->taskq);
+}
+void
+flush_workqueue(struct workqueue_struct *wq)
+{
+ ddi_taskq_wait(wq->taskq);
+}
diff --git a/usr/src/uts/common/io/drm/drm_sunmod.c b/usr/src/uts/common/io/drm/drm_sunmod.c
index 2f69229..24971ba 100644
--- a/usr/src/uts/common/io/drm/drm_sunmod.c
+++ b/usr/src/uts/common/io/drm/drm_sunmod.c
@@ -1,27 +1,28 @@
/*
- * CDDL HEADER START
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
*
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
*/
/*
@@ -34,288 +35,427 @@
*/
#include "drm_sunmod.h"
+#include "drm_sun_idr.h"
#include <sys/modctl.h>
#include <sys/kmem.h>
#include <vm/seg_kmem.h>
-static struct modlmisc modlmisc = {
- &mod_miscops, "DRM common interfaces"
+int drm_debug_flag = 0;
+int mdb_track_enable = B_FALSE;
+
+/* Identifier of this driver */
+static struct vis_identifier text_ident = { "SUNWdrm" };
+
+static ddi_device_acc_attr_t dev_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC,
+ DDI_FLAGERR_ACC
};
-static struct modlinkage modlinkage = {
- MODREV_1, (void *)&modlmisc, NULL
+static ddi_device_acc_attr_t gem_dev_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_MERGING_OK_ACC,
+ DDI_FLAGERR_ACC
};
-static drm_inst_list_t *drm_inst_head;
-static kmutex_t drm_inst_list_lock;
+extern int __init drm_core_init(void);
+extern void __exit drm_core_exit(void);
+extern int drm_get_pci_index_reg(dev_info_t *, uint_t, uint_t, off_t *);
-static int drm_sun_open(dev_t *, int, int, cred_t *);
-static int drm_sun_close(dev_t, int, int, cred_t *);
-static int drm_sun_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
-static int drm_sun_devmap(dev_t, devmap_cookie_t, offset_t, size_t,
- size_t *, uint_t);
+struct find_gem_object {
+ offset_t offset;
+ struct drm_gem_object *obj;
+};
-/*
- * devmap callbacks for AGP and PCI GART
- */
-static int drm_devmap_map(devmap_cookie_t, dev_t,
- uint_t, offset_t, size_t, void **);
-static int drm_devmap_dup(devmap_cookie_t, void *,
- devmap_cookie_t, void **);
-static void drm_devmap_unmap(devmap_cookie_t, void *,
- offset_t, size_t, devmap_cookie_t, void **, devmap_cookie_t, void **);
+static int
+drm_devmap_map(devmap_cookie_t dhc, dev_t dev_id, uint_t flags,
+ offset_t offset, size_t len, void **new_priv)
+{
+ _NOTE(ARGUNUSED(offset, len))
-static drm_inst_list_t *drm_supp_alloc_drv_entry(dev_info_t *);
-static drm_inst_state_t *drm_sup_devt_to_state(dev_t);
-static void drm_supp_free_drv_entry(dev_info_t *);
+ devmap_handle_t *dhp;
+ struct ddi_umem_cookie *cp;
+ struct drm_minor *minor;
+ struct drm_device *dev;
+ int minor_id;
-static struct devmap_callback_ctl drm_devmap_callbacks = {
- DEVMAP_OPS_REV, /* devmap_rev */
- drm_devmap_map, /* devmap_map */
- NULL, /* devmap_access */
- drm_devmap_dup, /* devmap_dup */
- drm_devmap_unmap /* devmap_unmap */
-};
+ minor_id = DRM_DEV2MINOR(dev_id);
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ dev = minor->dev;
-/*
- * Common device operations structure for all DRM drivers
- */
-struct cb_ops drm_cb_ops = {
- drm_sun_open, /* cb_open */
- drm_sun_close, /* cb_close */
- nodev, /* cb_strategy */
- nodev, /* cb_print */
- nodev, /* cb_dump */
- nodev, /* cb_read */
- nodev, /* cb_write */
- drm_sun_ioctl, /* cb_ioctl */
- drm_sun_devmap, /* cb_devmap */
- nodev, /* cb_mmap */
- NULL, /* cb_segmap */
- nochpoll, /* cb_chpoll */
- ddi_prop_op, /* cb_prop_op */
- 0, /* cb_stream */
- D_NEW | D_MTSAFE |D_DEVMAP /* cb_flag */
-};
+ /*
+ * This driver only supports MAP_SHARED,
+ * and doesn't support MAP_PRIVATE
+ */
+ if (flags & MAP_PRIVATE) {
+ DRM_ERROR("Not support MAP_PRIVATE");
+ return (EINVAL);
+ }
-int
-_init(void)
+ mutex_enter(&dev->struct_mutex);
+ dhp = (devmap_handle_t *)dhc;
+ cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
+ cp->cook_refcnt = 1;
+ mutex_exit(&dev->struct_mutex);
+
+ *new_priv = dev;
+ return (0);
+}
+
+static int
+drm_devmap_dup(devmap_cookie_t dhc, void *pvtp, devmap_cookie_t new_dhc,
+ void **new_pvtp)
{
- int error;
+ _NOTE(ARGUNUSED(new_dhc))
- if ((error = mod_install(&modlinkage)) != 0) {
- return (error);
- }
+ struct drm_device *dev = (struct drm_device *)pvtp;
+ devmap_handle_t *dhp;
+ struct ddi_umem_cookie *cp;
+
+ mutex_enter(&dev->struct_mutex);
+ dhp = (devmap_handle_t *)dhc;
+ cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
+ cp->cook_refcnt++;
+ mutex_exit(&dev->struct_mutex);
- /* initialize the instance list lock */
- mutex_init(&drm_inst_list_lock, NULL, MUTEX_DRIVER, NULL);
+ *new_pvtp = dev;
return (0);
}
-int
-_fini(void)
+static void
+drm_devmap_unmap(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
+ devmap_cookie_t new_dhp1, void **new_pvtp1, devmap_cookie_t new_dhp2,
+ void **new_pvtp2)
{
- int err;
+ _NOTE(ARGUNUSED(off, len))
- if ((err = mod_remove(&modlinkage)) != 0)
- return (err);
+ struct drm_device *dev;
+ devmap_handle_t *dhp;
+ devmap_handle_t *ndhp;
+ struct ddi_umem_cookie *cp;
+ struct ddi_umem_cookie *ncp;
- mutex_destroy(&drm_inst_list_lock);
- return (0);
+ dhp = (devmap_handle_t *)dhc;
+ dev = (struct drm_device *)pvtp;
+
+ mutex_enter(&dev->struct_mutex);
+
+ cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
+ if (new_dhp1 != NULL) {
+ ndhp = (devmap_handle_t *)new_dhp1;
+ ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
+ ncp->cook_refcnt++;
+ *new_pvtp1 = dev;
+ ASSERT(ncp == cp);
+ }
+
+ if (new_dhp2 != NULL) {
+ ndhp = (devmap_handle_t *)new_dhp2;
+ ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
+ ncp->cook_refcnt++;
+ *new_pvtp2 = dev;
+ ASSERT(ncp == cp);
+ }
+
+ /* FIXME: dh_cookie should not be released here. */
+#if 0
+ cp->cook_refcnt--;
+ if (cp->cook_refcnt == 0) {
+ gfxp_umem_cookie_destroy(dhp->dh_cookie);
+ dhp->dh_cookie = NULL;
+ }
+#endif
+
+ mutex_exit(&dev->struct_mutex);
}
-int
-_info(struct modinfo *modinfop)
+static struct devmap_callback_ctl drm_devmap_callbacks = {
+ DEVMAP_OPS_REV, /* devmap_rev */
+ drm_devmap_map, /* devmap_map */
+ NULL, /* devmap_access */
+ drm_devmap_dup, /* devmap_dup */
+ drm_devmap_unmap /* devmap_unmap */
+};
+
+static struct drm_local_map *
+__find_local_map(struct drm_device *dev, offset_t offset)
{
- return (mod_info(&modlinkage, modinfop));
+ struct drm_map_list *entry;
+
+ entry = idr_find(&dev->map_idr, offset >> PAGE_SHIFT);
+ if (entry)
+ return (entry->map);
+
+ return (NULL);
}
-void *
-drm_supp_register(dev_info_t *dip, drm_device_t *dp)
+static int
+drm_gem_map(devmap_cookie_t dhp, dev_t dev, uint_t flags, offset_t off,
+ size_t len, void **pvtp)
{
- int error;
- char buf[80];
- int instance = ddi_get_instance(dip);
- ddi_acc_handle_t pci_cfg_handle;
- agp_master_softc_t *agpm;
- drm_inst_state_t *mstate;
- drm_inst_list_t *entry;
- gfxp_vgatext_softc_ptr_t gfxp;
- struct dev_ops *devop;
-
- ASSERT(dip != NULL);
-
- entry = drm_supp_alloc_drv_entry(dip);
- if (entry == NULL) {
- cmn_err(CE_WARN, "drm_supp_register: failed to get softstate");
- return (NULL);
+ _NOTE(ARGUNUSED(dhp, flags, len))
+
+ struct drm_device *drm_dev;
+ struct drm_minor *minor;
+ int minor_id = DRM_DEV2MINOR(dev);
+ drm_local_map_t *map = NULL;
+
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ if (!minor->dev)
+ return (ENODEV);
+
+ drm_dev = minor->dev;
+
+ mutex_enter(&drm_dev->struct_mutex);
+ map = __find_local_map(drm_dev, off);
+ if (!map) {
+ mutex_exit(&drm_dev->struct_mutex);
+ *pvtp = NULL;
+ return (DDI_EINVAL);
}
- mstate = &entry->disl_state;
- /*
- * DRM drivers are required to use common cb_ops
- */
- devop = ddi_get_driver(dip);
- if (devop->devo_cb_ops != &drm_cb_ops) {
- devop->devo_cb_ops = &drm_cb_ops;
+ mutex_exit(&drm_dev->struct_mutex);
+
+ *pvtp = map->handle;
+
+ return (DDI_SUCCESS);
+}
+
+static int
+drm_gem_map_access(devmap_cookie_t dhp, void *pvt, offset_t offset, size_t len,
+ uint_t type, uint_t rw)
+{
+ struct drm_device *dev;
+ struct drm_gem_object *obj;
+ struct gem_map_list *seg;
+
+ obj = (struct drm_gem_object *)pvt;
+ if (obj == NULL) {
+ goto next;
}
- /* Generic graphics initialization */
- gfxp = gfxp_vgatext_softc_alloc();
- error = gfxp_vgatext_attach(dip, DDI_ATTACH, gfxp);
- if (error != DDI_SUCCESS) {
- DRM_ERROR("drm_supp_regiter: failed to init gfx");
- goto exit1;
+ dev = obj->dev;
+ if (dev->driver->gem_fault != NULL)
+ dev->driver->gem_fault(obj);
+
+next:
+ if (devmap_load(dhp, offset, len, type, rw)) {
+ return (DDI_FAILURE);
+ }
+ if (obj != NULL) {
+ seg = drm_alloc(sizeof (struct gem_map_list), DRM_MEM_MAPS);
+ if (seg != NULL) {
+ mutex_lock(&dev->page_fault_lock);
+ seg->dhp = dhp;
+ seg->mapoffset = offset;
+ seg->maplen = len;
+ list_add_tail(&seg->head, &obj->seg_list, (caddr_t)seg);
+ mutex_unlock(&dev->page_fault_lock);
+ }
}
+ return (DDI_SUCCESS);
+}
+
+static void
+drm_gem_unmap(devmap_cookie_t dhp, void *pvtp, offset_t off, size_t len,
+ devmap_cookie_t new_dhp1, void **newpvtp1,
+ devmap_cookie_t new_dhp2, void **newpvtp2)
+{
+ struct drm_device *dev;
+ struct drm_gem_object *obj;
+ struct gem_map_list *entry, *temp;
- /* create a minor node for common graphics ops */
- (void) sprintf(buf, "%s%d", GFX_NAME, instance);
- error = ddi_create_minor_node(dip, buf, S_IFCHR,
- INST2NODE0(instance), DDI_NT_DISPLAY, NULL);
- if (error != DDI_SUCCESS) {
- DRM_ERROR("drm_supp_regiter: "
- "failed to create minor node for gfx");
- goto exit2;
+ _NOTE(ARGUNUSED(dhp, pvtp, off, len, new_dhp1, newpvtp1))
+ _NOTE(ARGUNUSED(new_dhp2, newpvtp2))
+
+ obj = (struct drm_gem_object *)pvtp;
+ if (obj == NULL)
+ return;
+
+ dev = obj->dev;
+
+ mutex_lock(&dev->page_fault_lock);
+ if (list_empty(&obj->seg_list)) {
+ mutex_unlock(&dev->page_fault_lock);
+ return;
}
- /* setup mapping for later PCI config space access */
- error = pci_config_setup(dip, &pci_cfg_handle);
- if (error != DDI_SUCCESS) {
- DRM_ERROR("drm_supp_regiter: "
- "PCI configuration space setup failed");
- goto exit2;
+ list_for_each_entry_safe(entry, temp, struct gem_map_list,
+ &obj->seg_list, head) {
+ (void) devmap_unload(entry->dhp, entry->mapoffset,
+ entry->maplen);
+ list_del(&entry->head);
+ drm_free(entry, sizeof (struct gem_map_list), DRM_MEM_MAPS);
}
- /* AGP master attach */
- agpm = NULL;
- if (dp->driver->use_agp) {
- DRM_DEBUG("drm_supp_regiter: driver use AGP\n");
- error = agpmaster_attach(dip, &agpm,
- pci_cfg_handle, INST2NODE1(instance));
- if ((error != DDI_SUCCESS) && (dp->driver->require_agp)) {
- DRM_ERROR("drm_supp_regiter: "
- "AGP master support not available");
- goto exit3;
- }
+ mutex_unlock(&dev->page_fault_lock);
+}
+
+static struct devmap_callback_ctl drm_gem_map_ops = {
+ DEVMAP_OPS_REV, /* devmap_ops version number */
+ drm_gem_map, /* devmap_ops map routine */
+ drm_gem_map_access, /* devmap_ops access routine */
+ NULL, /* devmap_ops dup routine */
+ drm_gem_unmap, /* devmap_ops unmap routine */
+};
+
+static int
+__devmap_general(struct drm_device *dev, devmap_cookie_t dhp,
+ struct drm_local_map *map, size_t len, size_t *maplen)
+{
+ off_t regoff;
+ int regno, ret;
+
+ regno = drm_get_pci_index_reg(dev->devinfo,
+ map->offset, (uint_t)len, &regoff);
+ if (regno < 0) {
+ DRM_ERROR("drm_get_pci_index_reg() failed");
+ return (-EINVAL);
}
- mutex_enter(&mstate->mis_lock);
- mstate->mis_major = ddi_driver_major(dip);
- mstate->mis_dip = dip;
- mstate->mis_gfxp = gfxp;
- mstate->mis_agpm = agpm;
- mstate->mis_cfg_hdl = pci_cfg_handle;
- mstate->mis_devp = dp;
- mutex_exit(&mstate->mis_lock);
-
- /* create minor node for DRM access */
- (void) sprintf(buf, "%s%d", DRM_DEVNODE, instance);
- if (ddi_create_minor_node(dip, buf, S_IFCHR,
- INST2NODE2(instance), DDI_NT_DISPLAY_DRM, 0)) {
- DRM_ERROR("supp_regiter: faled to create minor node for drm");
- goto exit4;
+ ret = devmap_devmem_setup(dhp, dev->devinfo, NULL,
+ regno, (offset_t)regoff, len, PROT_ALL,
+ 0, &dev_attr);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("devmap_devmem_setup failed, ret=%d", ret);
+ return (-EFAULT);
}
- return ((void *)mstate);
+ *maplen = len;
+ return (0);
+}
-exit4:
- if ((dp->driver->use_agp) && agpm)
- agpmaster_detach(&agpm);
-exit3:
- pci_config_teardown(&pci_cfg_handle);
-exit2:
- (void) gfxp_vgatext_detach(dip, DDI_DETACH, gfxp);
-exit1:
- gfxp_vgatext_softc_free(gfxp);
- drm_supp_free_drv_entry(dip);
- ddi_remove_minor_node(dip, NULL);
+static int
+__devmap_shm(struct drm_device *dev, devmap_cookie_t dhp,
+ struct drm_local_map *map, size_t len, size_t *maplen)
+{
+ int ret;
- return (NULL);
-}
+ if (!map->umem_cookie)
+ return (-EINVAL);
+ len = ptob(btopr(map->size));
-int
-drm_supp_unregister(void *handle)
+ ret = devmap_umem_setup(dhp, dev->devinfo,
+ NULL, map->umem_cookie, 0, len, PROT_ALL,
+ IOMEM_DATA_CACHED, NULL);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("devmap_umem_setup failed, ret=%d", ret);
+ return (-EFAULT);
+ }
+
+ *maplen = len;
+ return (0);
+}
+
+static int
+__devmap_agp(struct drm_device *dev, devmap_cookie_t dhp,
+ struct drm_local_map *map, size_t len, size_t *maplen)
{
- drm_inst_list_t *list;
- drm_inst_state_t *mstate;
-
- list = (drm_inst_list_t *)handle;
- mstate = &list->disl_state;
- mutex_enter(&mstate->mis_lock);
-
- /* AGP master detach */
- if (mstate->mis_agpm != NULL)
- agpmaster_detach(&mstate->mis_agpm);
-
- /* free PCI config access handle */
- if (mstate->mis_cfg_hdl)
- pci_config_teardown(&mstate->mis_cfg_hdl);
-
- /* graphics misc module detach */
- if (mstate->mis_gfxp) {
- (void) gfxp_vgatext_detach(mstate->mis_dip, DDI_DETACH,
- mstate->mis_gfxp);
- gfxp_vgatext_softc_free(mstate->mis_gfxp);
+ int ret;
+
+ if (dev->agp == NULL) {
+ DRM_ERROR("attempted to mmap AGP"
+ "memory before AGP support is enabled");
+ return (-ENODEV);
}
- mstate->mis_devp = NULL;
+ len = ptob(btopr(len));
- /* remove all minor nodes */
- ddi_remove_minor_node(mstate->mis_dip, NULL);
- mutex_exit(&mstate->mis_lock);
- drm_supp_free_drv_entry(mstate->mis_dip);
+ ret = devmap_umem_setup(dhp, dev->devinfo,
+ &drm_devmap_callbacks, map->umem_cookie, 0, len, PROT_ALL,
+ IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("devmap_umem_setup() failed, ret=%d", ret);
+ return (-EFAULT);
+ }
- return (DDI_SUCCESS);
+ *maplen = len;
+ return (0);
}
+static int
+__devmap_sg(struct drm_device *dev, devmap_cookie_t dhp,
+ struct drm_local_map *map, size_t len, size_t *maplen)
+{
+ int ret;
+
+ len = ptob(btopr(len));
+ if (len > map->size) {
+ DRM_ERROR("offset=0x%lx, virtual=0x%p, "
+ "mapsize=0x%lx, len=0x%lx",
+ map->offset, dev->sg->virtual, map->size, len);
+ return (-EINVAL);
+ }
+
+ ret = devmap_umem_setup(dhp, dev->devinfo,
+ &drm_devmap_callbacks, map->umem_cookie, 0, len, PROT_ALL,
+ IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("devmap_umem_setup() fail");
+ return (-EFAULT);
+ }
+
+ *maplen = len;
+ return (0);
+}
-/*ARGSUSED*/
static int
-drm_sun_open(dev_t *devp, int flag, int otyp, cred_t *credp)
+__devmap_gem(struct drm_device *dev, devmap_cookie_t dhp,
+ struct drm_local_map *map, size_t *maplen)
{
- drm_inst_state_t *mstate;
- drm_cminor_t *mp, *newp;
- drm_device_t *dp;
- minor_t minor;
- int newminor;
- int instance;
- int err;
-
- mstate = drm_sup_devt_to_state(*devp);
- /*
- * return ENXIO for deferred attach so that system can
- * attach us again.
- */
- if (mstate == NULL)
- return (ENXIO);
+ struct devmap_callback_ctl *callbackops = NULL;
+ int ret;
- /*
- * The lest significant 15 bits are used for minor_number, and
- * the mid 3 bits are used for instance number. All minor numbers
- * are used as follows:
- * 0 -- gfx
- * 1 -- agpmaster
- * 2 -- drm
- * (3, MAX_CLONE_MINOR) -- drm minor node for clone open.
- */
- minor = DEV2MINOR(*devp);
- instance = DEV2INST(*devp);
- ASSERT(minor <= MAX_CLONE_MINOR);
+ if (map->callback == 1) {
+ callbackops = &drm_gem_map_ops;
+ }
+
+ if (!map->umem_cookie)
+ return (-EINVAL);
+
+ ret = gfxp_devmap_umem_setup(dhp, dev->devinfo, callbackops,
+ map->umem_cookie, 0, map->size, PROT_ALL,
+ IOMEM_DATA_UC_WR_COMBINE | DEVMAP_ALLOW_REMAP, &gem_dev_attr);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("gfxp_devmap_umem_setup failed, ret=%d", ret);
+ return (-EFAULT);
+ }
+
+ *maplen = map->size;
+ return (0);
+}
+
+static int
+drm_sun_open(dev_t *dev_id, int flag, int otyp, cred_t *credp)
+{
+ _NOTE(ARGUNUSED(otyp))
+
+ int minor_id = DRM_DEV2MINOR(*dev_id);
+ struct drm_minor *minor;
+ int clone_id;
+ int ret;
+
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ if (!minor->dev)
+ return (ENODEV);
/*
* No operations for VGA & AGP mater devices, always return OK.
*/
- if ((minor == GFX_MINOR) || (minor == AGPMASTER_MINOR))
+ if (DRM_MINOR_IS_VGATEXT(minor_id))
return (0);
- /*
- * From here, we start to process drm
- */
-
- dp = mstate->mis_devp;
- if (!dp)
- return (ENXIO);
+ if (DRM_MINOR_IS_AGPMASTER(minor_id))
+ return (0);
/*
* Drm driver implements a software lock to serialize access
@@ -352,86 +492,79 @@ drm_sun_open(dev_t *devp, int flag, int otyp, cred_t *credp)
* during open()'ing, and find corresponding process struct
* via minor number when close() is called.
*/
- newp = kmem_zalloc(sizeof (drm_cminor_t), KM_SLEEP);
- mutex_enter(&dp->dev_lock);
- for (newminor = DRM_MIN_CLONEMINOR; newminor < MAX_CLONE_MINOR;
- newminor ++) {
- TAILQ_FOREACH(mp, &dp->minordevs, link) {
- if (mp->minor == newminor)
- break;
- }
- if (mp == NULL)
- goto gotminor;
+ ret = idr_get_new_above(&minor->clone_idr, NULL, 0, &clone_id);
+ if (ret)
+ return (EMFILE);
+
+ if (clone_id > DRM_CLONEID_MAX) {
+ (void) idr_remove(&minor->clone_idr, clone_id);
+ return (EMFILE);
}
- mutex_exit(&dp->dev_lock);
- (void) kmem_free(newp, sizeof (drm_cminor_t));
- return (EMFILE);
-
-gotminor:
- TAILQ_INSERT_TAIL(&dp->minordevs, newp, link);
- newp->minor = newminor;
- mutex_exit(&dp->dev_lock);
- err = drm_open(dp, newp, flag, otyp, credp);
- if (err) {
- mutex_enter(&dp->dev_lock);
- TAILQ_REMOVE(&dp->minordevs, newp, link);
- (void) kmem_free(newp, sizeof (drm_cminor_t));
- mutex_exit(&dp->dev_lock);
-
- return (err);
+ ret = drm_open(minor, clone_id, flag, credp);
+ if (ret) {
+ (void) idr_remove(&minor->clone_idr, clone_id);
+ return (-ret);
}
- /* return a clone minor */
- newminor = newminor | (instance << NBITSMNODE);
- *devp = makedevice(getmajor(*devp), newminor);
- return (err);
+ *dev_id = DRM_MAKEDEV(getmajor(*dev_id), minor_id, clone_id);
+
+ return (-ret);
}
-/*ARGSUSED*/
static int
-drm_sun_close(dev_t dev, int flag, int otyp, cred_t *credp)
+drm_sun_close(dev_t dev_id, int flag, int otyp, cred_t *credp)
{
- drm_inst_state_t *mstate;
- drm_device_t *dp;
- minor_t minor;
- int ret;
+ _NOTE(ARGUNUSED(flag, otyp, credp))
- mstate = drm_sup_devt_to_state(dev);
- if (mstate == NULL)
- return (EBADF);
+ struct drm_minor *minor;
+ struct drm_file *file_priv;
+ int minor_id = DRM_DEV2MINOR(dev_id);
+ int clone_id = DRM_DEV2CLONEID(dev_id);
+ int ret = 0;
+
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ if (!minor->dev)
+ return (ENODEV);
- minor = DEV2MINOR(dev);
- ASSERT(minor <= MAX_CLONE_MINOR);
- if ((minor == GFX_MINOR) || (minor == AGPMASTER_MINOR))
+ /*
+ * No operations for VGA & AGP mater devices, always return OK.
+ */
+ if (DRM_MINOR_IS_VGATEXT(minor_id))
return (0);
- dp = mstate->mis_devp;
- if (dp == NULL) {
- DRM_ERROR("drm_sun_close: NULL soft state");
- return (ENXIO);
- }
+ if (DRM_MINOR_IS_AGPMASTER(minor_id))
+ return (0);
- ret = drm_close(dp, minor, flag, otyp, credp);
+ file_priv = idr_find(&minor->clone_idr, clone_id);
+ if (!file_priv)
+ return (EBADF);
+
+ ret = drm_release(file_priv);
+ if (ret)
+ return (-ret);
+
+ (void) idr_remove(&minor->clone_idr, clone_id);
- return (ret);
+ return (0);
}
-/*ARGSUSED*/
static int
-drm_sun_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
- cred_t *credp, int *rvalp)
+drm_sun_ioctl(dev_t dev_id, int cmd, intptr_t arg, int mode, cred_t *credp,
+ int *rvalp)
{
- extern drm_ioctl_desc_t drm_ioctls[];
+ struct drm_minor *minor;
+ struct drm_file *file_priv;
+ int minor_id = DRM_DEV2MINOR(dev_id);
+ int clone_id = DRM_DEV2CLONEID(dev_id);
- drm_inst_state_t *mstate;
- drm_device_t *dp;
- drm_ioctl_desc_t *ioctl;
- drm_ioctl_t *func;
- drm_file_t *fpriv;
- minor_t minor;
- int retval;
- int nr;
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ if (!minor->dev)
+ return (ENODEV);
if (cmd == VIS_GETIDENTIFIER) {
if (ddi_copyout(&text_ident, (void *)arg,
@@ -439,572 +572,223 @@ drm_sun_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
return (EFAULT);
}
- mstate = drm_sup_devt_to_state(dev);
- if (mstate == NULL) {
- return (EIO);
- }
-
- minor = DEV2MINOR(dev);
- ASSERT(minor <= MAX_CLONE_MINOR);
- switch (minor) {
- case GFX_MINOR:
- retval = gfxp_vgatext_ioctl(dev, cmd, arg,
- mode, credp, rvalp, mstate->mis_gfxp);
- return (retval);
-
- case AGPMASTER_MINOR:
- retval = agpmaster_ioctl(dev, cmd, arg, mode,
- credp, rvalp, mstate->mis_agpm);
- return (retval);
-
- case DRM_MINOR:
- default: /* DRM cloning minor nodes */
- break;
- }
-
- dp = mstate->mis_devp;
- ASSERT(dp != NULL);
-
- nr = DRM_IOCTL_NR(cmd);
- ioctl = &drm_ioctls[nr];
- atomic_inc_32(&dp->counts[_DRM_STAT_IOCTLS]);
-
- /* It's not a core DRM ioctl, try driver-specific. */
- if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
- /* The array entries begin at DRM_COMMAND_BASE ioctl nr */
- nr -= DRM_COMMAND_BASE;
- if (nr > dp->driver->max_driver_ioctl) {
- DRM_ERROR("Bad driver ioctl number, 0x%x (of 0x%x)",
- nr, dp->driver->max_driver_ioctl);
- return (EINVAL);
- }
- ioctl = &dp->driver->driver_ioctls[nr];
- }
-
- func = ioctl->func;
- if (func == NULL) {
- return (ENOTSUP);
- }
-
- mutex_enter(&dp->dev_lock);
- fpriv = drm_find_file_by_proc(dp, credp);
- mutex_exit(&dp->dev_lock);
- if (fpriv == NULL) {
- DRM_ERROR("drm_sun_ioctl : can't find authenticator");
- return (EACCES);
- }
-
- if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(credp)) ||
- ((ioctl->flags & DRM_AUTH) && !fpriv->authenticated) ||
- ((ioctl->flags & DRM_MASTER) && !fpriv->master))
- return (EACCES);
+ if (DRM_MINOR_IS_VGATEXT(minor_id))
+ return (gfxp_vgatext_ioctl(dev_id, cmd, arg, mode, credp,
+ rvalp, minor->private));
- fpriv->dev = dev;
- fpriv->credp = credp;
+ if (DRM_MINOR_IS_AGPMASTER(minor_id))
+ return (agpmaster_ioctl(dev_id, cmd, arg, mode, credp,
+ rvalp, minor->private));
- retval = func(dp, arg, fpriv, mode);
+ file_priv = idr_find(&minor->clone_idr, clone_id);
+ if (!file_priv)
+ return (EBADF);
- return (retval);
+ return (-(drm_ioctl(dev_id, file_priv, cmd, arg, mode, credp)));
}
-/*ARGSUSED*/
static int
-drm_sun_devmap(dev_t dev, devmap_cookie_t dhp, offset_t offset,
+drm_sun_devmap(dev_t dev_id, devmap_cookie_t dhp, offset_t offset,
size_t len, size_t *maplen, uint_t model)
{
- extern int drm_get_pci_index_reg(dev_info_t *, uint_t, uint_t, off_t *);
-
- drm_inst_state_t *mstate;
- drm_device_t *dp;
- ddi_umem_cookie_t cookie;
- drm_local_map_t *map = NULL;
- unsigned long aperbase;
- u_offset_t handle;
- offset_t koff;
- caddr_t kva;
- minor_t minor;
- size_t length;
- int ret;
-
- static ddi_device_acc_attr_t dev_attr = {
- DDI_DEVICE_ATTR_V0,
- DDI_NEVERSWAP_ACC,
- DDI_STRICTORDER_ACC,
- };
- static ddi_device_acc_attr_t gem_dev_attr = {
- DDI_DEVICE_ATTR_V0,
- DDI_NEVERSWAP_ACC,
- DDI_MERGING_OK_ACC
- };
-
- mstate = drm_sup_devt_to_state(dev);
- if (mstate == NULL)
- return (ENXIO);
-
- minor = DEV2MINOR(dev);
- switch (minor) {
- case GFX_MINOR:
- ret = gfxp_vgatext_devmap(dev, dhp, offset, len, maplen, model,
- mstate->mis_gfxp);
- return (ret);
-
- case AGPMASTER_MINOR:
+ struct drm_device *dev;
+ struct drm_minor *minor;
+ struct drm_file *file_priv;
+ int minor_id = DRM_DEV2MINOR(dev_id);
+ int clone_id = DRM_DEV2CLONEID(dev_id);
+ drm_local_map_t *map = NULL;
+
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ if (!minor->dev)
+ return (ENODEV);
+
+ dev = minor->dev;
+
+ if (DRM_MINOR_IS_VGATEXT(minor_id))
+ return (gfxp_vgatext_devmap(dev_id, dhp, offset, len,
+ maplen, model, minor->private));
+
+ if (DRM_MINOR_IS_AGPMASTER(minor_id))
return (ENOTSUP);
- case DRM_MINOR:
- break;
-
- default:
- /* DRM cloning nodes */
- if (minor > MAX_CLONE_MINOR)
- return (EBADF);
- break;
- }
-
-
- dp = mstate->mis_devp;
- if (dp == NULL) {
- DRM_ERROR("drm_sun_devmap: NULL soft state");
- return (EINVAL);
- }
-
- mutex_enter(&dp->dev_lock);
-
- if (dp->driver->use_gem == 1) {
- struct idr_list *entry;
- drm_cminor_t *mp;
-
- mp = drm_find_file_by_minor(dp, minor);
- if (!mp) {
- mutex_exit(&dp->dev_lock);
- DRM_ERROR("drm_sun_devmap: can't find authenticator");
- return (EACCES);
- }
-
- spin_lock(&dp->struct_mutex);
- idr_list_for_each(entry, &(mp->fpriv->object_idr)) {
- if ((uintptr_t)entry->obj == (u_offset_t)offset) {
- map = entry->obj->map;
- goto goon;
- }
- }
-goon:
- spin_unlock(&dp->struct_mutex);
- }
-
- if (map == NULL) {
- /*
- * We will solve 32-bit application on 64-bit kernel
- * issue later, now, we just use low 32-bit
- */
- handle = (u_offset_t)offset;
- handle &= 0xffffffff;
-
- TAILQ_FOREACH(map, &dp->maplist, link) {
- if (handle ==
- ((u_offset_t)((uintptr_t)map->handle) & 0xffffffff))
- break;
- }
-
- /*
- * Temporarily, because offset is phys_addr for register
- * and framebuffer, is kernel virtual_addr for others
- * Maybe we will use hash table to solve this issue later.
- */
- if (map == NULL) {
- TAILQ_FOREACH(map, &dp->maplist, link) {
- if (handle == (map->offset & 0xffffffff))
- break;
- }
- }
- }
+ file_priv = idr_find(&minor->clone_idr, clone_id);
+ if (!file_priv)
+ return (EBADF);
- if (map == NULL) {
- u_offset_t tmp;
-
- mutex_exit(&dp->dev_lock);
- cmn_err(CE_WARN, "Can't find map, offset=0x%llx, len=%x\n",
- offset, (int)len);
- cmn_err(CE_WARN, "Current mapping:\n");
- TAILQ_FOREACH(map, &dp->maplist, link) {
- tmp = (u_offset_t)((uintptr_t)map->handle) & 0xffffffff;
- cmn_err(CE_WARN, "map(handle=0x%p, size=0x%lx,type=%d,"
- "offset=0x%lx), handle=%llx, tmp=%lld", map->handle,
- map->size, map->type, map->offset, handle, tmp);
- }
- return (-1);
+ mutex_enter(&dev->struct_mutex);
+ map = __find_local_map(dev, offset);
+ if (!map) {
+ mutex_exit(&dev->struct_mutex);
+ return (EFAULT);
}
if (map->flags & _DRM_RESTRICTED) {
- mutex_exit(&dp->dev_lock);
- cmn_err(CE_WARN, "restricted map\n");
- return (-1);
+ mutex_exit(&dev->struct_mutex);
+ return (ENOTSUP);
}
+ mutex_exit(&dev->struct_mutex);
- mutex_exit(&dp->dev_lock);
switch (map->type) {
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
- {
- int regno;
- off_t regoff;
-
- regno = drm_get_pci_index_reg(dp->dip,
- map->offset, (uint_t)len, &regoff);
- if (regno < 0) {
- DRM_ERROR("devmap: failed to get register"
- " offset=0x%llx, len=0x%x", handle, len);
- return (EINVAL);
- }
-
- ret = devmap_devmem_setup(dhp, dp->dip, NULL,
- regno, (offset_t)regoff, len, PROT_ALL,
- 0, &dev_attr);
- if (ret != 0) {
- *maplen = 0;
- DRM_ERROR("devmap: failed, regno=%d,type=%d,"
- " handle=0x%x, offset=0x%llx, len=0x%x",
- regno, map->type, handle, offset, len);
- return (ret);
- }
- *maplen = len;
- return (ret);
- }
+ return (__devmap_general(dev, dhp, map, len, maplen));
case _DRM_SHM:
- if (map->drm_umem_cookie == NULL)
- return (EINVAL);
- length = ptob(btopr(map->size));
- ret = devmap_umem_setup(dhp, dp->dip, NULL,
- map->drm_umem_cookie, 0, length,
- PROT_ALL, IOMEM_DATA_CACHED, NULL);
- if (ret != 0) {
- *maplen = 0;
- return (ret);
- }
- *maplen = length;
-
- return (DDI_SUCCESS);
+ return (__devmap_shm(dev, dhp, map, len, maplen));
case _DRM_AGP:
- if (dp->agp == NULL) {
- cmn_err(CE_WARN, "drm_sun_devmap: attempted to mmap AGP"
- "memory before AGP support is enabled");
- return (DDI_FAILURE);
- }
-
- aperbase = dp->agp->base;
- koff = map->offset - aperbase;
- length = ptob(btopr(len));
- kva = map->dev_addr;
- cookie = gfxp_umem_cookie_init(kva, length);
- if (cookie == NULL) {
- cmn_err(CE_WARN, "devmap:failed to get umem_cookie");
- return (DDI_FAILURE);
- }
-
- if ((ret = devmap_umem_setup(dhp, dp->dip,
- &drm_devmap_callbacks, cookie, 0, length, PROT_ALL,
- IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr)) < 0) {
- gfxp_umem_cookie_destroy(cookie);
- cmn_err(CE_WARN, "devmap:failed, retval=%d", ret);
- return (DDI_FAILURE);
- }
- *maplen = length;
- break;
+ return (__devmap_agp(dev, dhp, map, len, maplen));
case _DRM_SCATTER_GATHER:
- koff = map->offset - (unsigned long)(caddr_t)dp->sg->virtual;
- kva = map->dev_addr + koff;
- length = ptob(btopr(len));
- if (length > map->size) {
- cmn_err(CE_WARN, "offset=0x%lx, virtual=0x%p,"
- "mapsize=0x%lx,len=0x%lx", map->offset,
- dp->sg->virtual, map->size, len);
- return (DDI_FAILURE);
- }
- cookie = gfxp_umem_cookie_init(kva, length);
- if (cookie == NULL) {
- cmn_err(CE_WARN, "devmap:failed to get umem_cookie");
- return (DDI_FAILURE);
- }
- ret = devmap_umem_setup(dhp, dp->dip,
- &drm_devmap_callbacks, cookie, 0, length, PROT_ALL,
- IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr);
- if (ret != 0) {
- cmn_err(CE_WARN, "sun_devmap: umem_setup fail");
- gfxp_umem_cookie_destroy(cookie);
- return (DDI_FAILURE);
- }
- *maplen = length;
- break;
-
- case _DRM_TTM:
- if (map->drm_umem_cookie == NULL)
- return (EINVAL);
-
- if (gfxp_devmap_umem_setup(dhp, dp->dip,
- NULL, map->drm_umem_cookie, 0, map->size, PROT_ALL,
- IOMEM_DATA_UC_WR_COMBINE | DEVMAP_ALLOW_REMAP,
- &gem_dev_attr)) {
- cmn_err(CE_WARN, "devmap:failed, retval=%d", ret);
- return (DDI_FAILURE);
- }
- *maplen = map->size;
- return (DDI_SUCCESS);
+ return (__devmap_sg(dev, dhp, map, len, maplen));
- default:
- return (DDI_FAILURE);
+ case _DRM_GEM:
+ return (__devmap_gem(dev, dhp, map, maplen));
}
- return (DDI_SUCCESS);
+ return (ENOTSUP);
}
-/*ARGSUSED*/
static int
-drm_devmap_map(devmap_cookie_t dhc, dev_t dev, uint_t flags,
- offset_t offset, size_t len, void **new_priv)
+drm_sun_read(dev_t dev_id, struct uio *uiop, cred_t *credp)
{
- devmap_handle_t *dhp;
- drm_inst_state_t *statep;
- struct ddi_umem_cookie *cp;
+ _NOTE(ARGUNUSED(credp))
+
+ struct drm_minor *minor;
+ struct drm_file *file_priv;
+ int minor_id = DRM_DEV2MINOR(dev_id);
+ int clone_id = DRM_DEV2CLONEID(dev_id);
- statep = drm_sup_devt_to_state(dev);
- ASSERT(statep != NULL);
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ if (!minor->dev)
+ return (ENODEV);
/*
- * This driver only supports MAP_SHARED,
- * and doesn't support MAP_PRIVATE
+ * No operations for VGA & AGP master devices, always return OK.
*/
- if (flags & MAP_PRIVATE) {
- cmn_err(CE_WARN, "!DRM driver doesn't support MAP_PRIVATE");
- return (EINVAL);
- }
+ if (DRM_MINOR_IS_VGATEXT(minor_id))
+ return (0);
- mutex_enter(&statep->dis_ctxlock);
- dhp = (devmap_handle_t *)dhc;
- cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
- cp->cook_refcnt = 1;
- mutex_exit(&statep->dis_ctxlock);
- *new_priv = statep;
+ if (DRM_MINOR_IS_AGPMASTER(minor_id))
+ return (0);
+ file_priv = idr_find(&minor->clone_idr, clone_id);
+ if (!file_priv)
+ return (EBADF);
+
+ (void) drm_read(file_priv, uiop);
return (0);
}
-/*ARGSUSED*/
-static void
-drm_devmap_unmap(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
- devmap_cookie_t new_dhp1, void **new_pvtp1, devmap_cookie_t new_dhp2,
- void **new_pvtp2)
+static int
+drm_sun_chpoll(dev_t dev_id, short events, int anyyet, short *reventsp,
+ struct pollhead **phpp)
{
- devmap_handle_t *dhp;
- devmap_handle_t *ndhp;
- drm_inst_state_t *statep;
- struct ddi_umem_cookie *cp;
- struct ddi_umem_cookie *ncp;
+ struct drm_minor *minor;
+ struct drm_file *file_priv;
+ int minor_id = DRM_DEV2MINOR(dev_id);
+ int clone_id = DRM_DEV2CLONEID(dev_id);
- dhp = (devmap_handle_t *)dhc;
- statep = (drm_inst_state_t *)pvtp;
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ if (!minor->dev)
+ return (ENODEV);
- mutex_enter(&statep->dis_ctxlock);
- cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
- if (new_dhp1 != NULL) {
- ndhp = (devmap_handle_t *)new_dhp1;
- ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
- ncp->cook_refcnt ++;
- *new_pvtp1 = statep;
- ASSERT(ncp == cp);
- }
-
- if (new_dhp2 != NULL) {
- ndhp = (devmap_handle_t *)new_dhp2;
- ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
- ncp->cook_refcnt ++;
- *new_pvtp2 = statep;
- ASSERT(ncp == cp);
- }
-
- cp->cook_refcnt --;
- if (cp->cook_refcnt == 0) {
- gfxp_umem_cookie_destroy(dhp->dh_cookie);
- dhp->dh_cookie = NULL;
- }
- mutex_exit(&statep->dis_ctxlock);
-}
+ /*
+ * No operations for VGA & AGP master devices, always return OK.
+ */
+ if (DRM_MINOR_IS_VGATEXT(minor_id))
+ return (0);
+ if (DRM_MINOR_IS_AGPMASTER(minor_id))
+ return (0);
-/*ARGSUSED*/
-static int
-drm_devmap_dup(devmap_cookie_t dhc, void *pvtp, devmap_cookie_t new_dhc,
- void **new_pvtp)
-{
- devmap_handle_t *dhp;
- drm_inst_state_t *statep;
- struct ddi_umem_cookie *cp;
+ file_priv = idr_find(&minor->clone_idr, clone_id);
+ if (!file_priv)
+ return (EBADF);
- statep = (drm_inst_state_t *)pvtp;
- mutex_enter(&statep->dis_ctxlock);
- dhp = (devmap_handle_t *)dhc;
- cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
- cp->cook_refcnt ++;
- mutex_exit(&statep->dis_ctxlock);
- *new_pvtp = statep;
+ if (!anyyet) {
+ *phpp = &file_priv->drm_pollhead;
+ }
+ *reventsp = drm_poll(file_priv, events);
return (0);
}
-int
-drm_dev_to_instance(dev_t dev)
-{
- return (DEV2INST(dev));
-}
-
/*
- * drm_supp_alloc_drv_entry()
- *
- * Description:
- * Create a DRM entry and add it into the instance list (drm_inst_head).
- * Note that we don't allow a duplicated entry
+ * Common device operations structure for all DRM drivers
*/
-static drm_inst_list_t *
-drm_supp_alloc_drv_entry(dev_info_t *dip)
-{
- drm_inst_list_t **plist;
- drm_inst_list_t *list;
- drm_inst_list_t *entry;
-
- /* protect the driver list */
- mutex_enter(&drm_inst_list_lock);
- plist = &drm_inst_head;
- list = *plist;
- while (list) {
- if (list->disl_state.mis_dip == dip) {
- mutex_exit(&drm_inst_list_lock);
- cmn_err(CE_WARN, "%s%d already registered",
- ddi_driver_name(dip), ddi_get_instance(dip));
- return (NULL);
- }
- plist = &list->disl_next;
- list = list->disl_next;
- }
-
- /* "dip" is not registered, create new one and add to list */
- entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
- *plist = entry;
- entry->disl_state.mis_dip = dip;
- mutex_init(&entry->disl_state.mis_lock, NULL, MUTEX_DRIVER, NULL);
- mutex_init(&entry->disl_state.dis_ctxlock, NULL, MUTEX_DRIVER, NULL);
- mutex_exit(&drm_inst_list_lock);
+struct cb_ops drm_cb_ops = {
+ drm_sun_open, /* cb_open */
+ drm_sun_close, /* cb_close */
+ nodev, /* cb_strategy */
+ nodev, /* cb_print */
+ nodev, /* cb_dump */
+ drm_sun_read, /* cb_read */
+ nodev, /* cb_write */
+ drm_sun_ioctl, /* cb_ioctl */
+ drm_sun_devmap, /* cb_devmap */
+ nodev, /* cb_mmap */
+ NULL, /* cb_segmap */
+ drm_sun_chpoll, /* cb_chpoll */
+ ddi_prop_op, /* cb_prop_op */
+ 0, /* cb_stream */
+ D_NEW | D_MTSAFE | D_DEVMAP /* cb_flag */
+};
- return (entry);
+static struct modlmisc modlmisc = {
+ &mod_miscops, "DRM common interfaces"
+};
-} /* drm_supp_alloc_drv_entry */
+static struct modlinkage modlinkage = {
+ MODREV_1, (void *)&modlmisc, NULL
+};
-/*
- * drm_supp_free_drv_entry()
- */
-static void
-drm_supp_free_drv_entry(dev_info_t *dip)
+int
+_init(void)
{
- drm_inst_list_t *list;
- drm_inst_list_t **plist;
- drm_inst_state_t *mstate;
-
- /* protect the driver list */
- mutex_enter(&drm_inst_list_lock);
- plist = &drm_inst_head;
- list = *plist;
- while (list) {
- if (list->disl_state.mis_dip == dip) {
- *plist = list->disl_next;
- mstate = &list->disl_state;
- mutex_destroy(&mstate->mis_lock);
- mutex_destroy(&mstate->dis_ctxlock);
- kmem_free(list, sizeof (*list));
- mutex_exit(&drm_inst_list_lock);
- return;
- }
- plist = &list->disl_next;
- list = list->disl_next;
- }
- mutex_exit(&drm_inst_list_lock);
+ int ret;
-} /* drm_supp_free_drv_entry() */
+ ret = mod_install(&modlinkage);
+ if (ret)
+ return (ret);
-/*
- * drm_sup_devt_to_state()
- *
- * description:
- * Get the soft state of DRM instance by device number
- */
-static drm_inst_state_t *
-drm_sup_devt_to_state(dev_t dev)
+ return (drm_core_init());
+}
+
+int
+_fini(void)
{
- drm_inst_list_t *list;
- drm_inst_state_t *mstate;
- major_t major = getmajor(dev);
- int instance = DEV2INST(dev);
-
- mutex_enter(&drm_inst_list_lock);
- list = drm_inst_head;
- while (list) {
- mstate = &list->disl_state;
- mutex_enter(&mstate->mis_lock);
-
- if ((mstate->mis_major == major) &&
- (ddi_get_instance(mstate->mis_dip) == instance)) {
- mutex_exit(&mstate->mis_lock);
- mutex_exit(&drm_inst_list_lock);
- return (mstate);
- }
+ int ret;
- list = list->disl_next;
- mutex_exit(&mstate->mis_lock);
- }
+ ret = mod_remove(&modlinkage);
+ if (ret)
+ return (ret);
- mutex_exit(&drm_inst_list_lock);
- return (NULL);
+ drm_core_exit();
-} /* drm_sup_devt_to_state() */
+ return (0);
+}
int
-drm_supp_get_irq(void *handle)
+_info(struct modinfo *modinfop)
{
- drm_inst_list_t *list;
- drm_inst_state_t *mstate;
- int irq;
-
- list = (drm_inst_list_t *)handle;
- mstate = &list->disl_state;
- ASSERT(mstate != NULL);
- irq = pci_config_get8(mstate->mis_cfg_hdl, PCI_CONF_ILINE);
- return (irq);
+ return (mod_info(&modlinkage, modinfop));
}
-int
-drm_supp_device_capability(void *handle, int capid)
+struct drm_local_map *
+drm_core_findmap(struct drm_device *dev, unsigned int token)
{
- drm_inst_list_t *list;
- drm_inst_state_t *mstate;
- uint8_t cap = 0;
- uint16_t caps_ptr;
-
- list = (drm_inst_list_t *)handle;
- mstate = &list->disl_state;
- ASSERT(mstate != NULL);
-
- /* has capabilities list ? */
- if ((pci_config_get16(mstate->mis_cfg_hdl, PCI_CONF_STAT) &
- PCI_CONF_CAP_MASK) == 0)
- return (NULL);
-
- caps_ptr = pci_config_get8(mstate->mis_cfg_hdl, PCI_CONF_CAP_PTR);
- while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
- cap = pci_config_get32(mstate->mis_cfg_hdl, caps_ptr);
- if ((cap & PCI_CONF_CAPID_MASK) == capid)
- return (cap);
- caps_ptr = pci_config_get8(mstate->mis_cfg_hdl,
- caps_ptr + PCI_CAP_NEXT_PTR);
+ struct drm_map_list *_entry;
+
+ list_for_each_entry(_entry, struct drm_map_list, &dev->maplist, head) {
+ if (_entry->user_token == token)
+ return (_entry->map);
}
- return (0);
+ return (NULL);
}
diff --git a/usr/src/uts/common/io/drm/drm_sunmod.h b/usr/src/uts/common/io/drm/drm_sunmod.h
deleted file mode 100644
index 32cd5c0..0000000
--- a/usr/src/uts/common/io/drm/drm_sunmod.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident "%Z%%M% %I% %E% SMI"
-
-/*
- * Common misc module interfaces of DRM under Solaris
- */
-
-/*
- * I915 DRM Driver for Solaris
- *
- * This driver provides the hardware 3D acceleration support for Intel
- * integrated video devices (e.g. i8xx/i915/i945 series chipsets), under the
- * DRI (Direct Rendering Infrastructure). DRM (Direct Rendering Manager) here
- * means the kernel device driver in DRI.
- *
- * I915 driver is a device dependent driver only, it depends on a misc module
- * named drm for generic DRM operations.
- *
- * This driver also calls into gfx and agpmaster misc modules respectively for
- * generic graphics operations and AGP master device support.
- */
-
-#ifndef _SYS_DRM_SUNMOD_H_
-#define _SYS_DRM_SUNMOD_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <sys/types.h>
-#include <sys/errno.h>
-#include <sys/conf.h>
-#include <sys/kmem.h>
-#include <sys/visual_io.h>
-#include <sys/font.h>
-#include <sys/fbio.h>
-#include <sys/ddi.h>
-#include <sys/sunddi.h>
-#include <sys/stat.h>
-#include <sys/file.h>
-#include <sys/open.h>
-#include <sys/modctl.h>
-#include <sys/vgareg.h>
-#include <sys/vgasubr.h>
-#include <sys/pci.h>
-#include <sys/kd.h>
-#include <sys/ddi_impldefs.h>
-#include <sys/sunldi.h>
-#include <sys/mkdev.h>
-#include <gfx_private.h>
-#include <sys/agpgart.h>
-#include <sys/agp/agpdefs.h>
-#include <sys/agp/agpmaster_io.h>
-#include "drmP.h"
-#include <sys/modctl.h>
-
-/*
- * dev_t of this driver looks consists of:
- *
- * major number with NBITSMAJOR bits
- * instance node number with NBITSINST bits
- * minor node number with NBITSMINOR - NBITSINST bits
- *
- * Each instance has at most 2^(NBITSMINOR - NBITSINST) minor nodes, the first
- * three are:
- * 0: gfx<instance number>, graphics common node
- * 1: agpmaster<instance number>, agpmaster node
- * 2: drm<instance number>, drm node
- */
-#define GFX_MINOR 0
-#define AGPMASTER_MINOR 1
-#define DRM_MINOR 2
-#define DRM_MIN_CLONEMINOR 3
-
-/*
- * Number of bits occupied by instance number in dev_t, currently maximum 8
- * instances are supported.
- */
-#define NBITSINST 3
-
-/* Number of bits occupied in dev_t by minor node */
-#define NBITSMNODE (18 - NBITSINST)
-
-/*
- * DRM use a "cloning" minor node mechanism to release lock on every close(2),
- * thus there will be a minor node for every open(2) operation. Here we give
- * the maximum DRM cloning minor node number.
- */
-#define MAX_CLONE_MINOR (1 << (NBITSMNODE) - 1)
-#define DEV2MINOR(dev) (getminor(dev) & ((1 << (NBITSMNODE)) - 1))
-#define DEV2INST(dev) (getminor(dev) >> NBITSMNODE)
-#define INST2NODE0(inst) ((inst) << NBITSMNODE)
-#define INST2NODE1(inst) (((inst) << NBITSMNODE) + AGPMASTER_MINOR)
-#define INST2NODE2(inst) (((inst) << NBITSMNODE) + DRM_MINOR)
-
-/* graphics name for the common graphics minor node */
-#define GFX_NAME "gfx"
-
-
-/*
- * softstate for DRM module
- */
-typedef struct drm_instance_state {
- kmutex_t mis_lock;
- kmutex_t dis_ctxlock;
- major_t mis_major;
- dev_info_t *mis_dip;
- drm_device_t *mis_devp;
- ddi_acc_handle_t mis_cfg_hdl;
- agp_master_softc_t *mis_agpm; /* agpmaster softstate ptr */
- gfxp_vgatext_softc_ptr_t mis_gfxp; /* gfx softstate */
-} drm_inst_state_t;
-
-
-struct drm_inst_state_list {
- drm_inst_state_t disl_state;
- struct drm_inst_state_list *disl_next;
-
-};
-typedef struct drm_inst_state_list drm_inst_list_t;
-
-
-/* Identifier of this driver */
-static struct vis_identifier text_ident = { "SUNWdrm" };
-static int drm_sun_open(dev_t *, int, int, cred_t *);
-static int drm_sun_close(dev_t, int, int, cred_t *);
-static int drm_sun_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
-static int drm_sun_devmap(dev_t, devmap_cookie_t,
- offset_t, size_t, size_t *, uint_t);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SYS_DRM_SUNMOD_H_ */
diff --git a/usr/src/uts/common/io/drm/drm_sysfs.c b/usr/src/uts/common/io/drm/drm_sysfs.c
new file mode 100644
index 0000000..ac59805
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sysfs.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/conf.h>
+#include <sys/kmem.h>
+#include <sys/visual_io.h>
+#include <sys/fbio.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/open.h>
+#include <sys/modctl.h>
+#include <sys/pci.h>
+#include <sys/kd.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/sunldi.h>
+#include <sys/mkdev.h>
+#include "drmP.h"
+#include <sys/agpgart.h>
+#include <sys/agp/agpdefs.h>
+#include <sys/agp/agpmaster_io.h>
+
+/**
+ * drm_sysfs_device_add - adds a class device to sysfs for a character driver
+ * @dev: DRM device to be added
+ * @head: DRM head in question
+ *
+ * Add a DRM device to the DRM's device model class. We use @dev's PCI device
+ * as the parent for the Linux device, and make sure it has a file containing
+ * the driver we're using (for userspace compatibility).
+ */
+int drm_sysfs_device_add(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ gfxp_vgatext_softc_ptr_t gfxp;
+ int ret;
+
+ switch (minor->type) {
+ case DRM_MINOR_AGPMASTER:
+ ret = agpmaster_attach(dev->devinfo,
+ (agp_master_softc_t **)&minor->private,
+ dev->pdev->pci_cfg_acc_handle, minor->index);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("agpmaster_attach failed");
+ return (ret);
+ }
+ return (0);
+
+ case DRM_MINOR_VGATEXT:
+ /* Generic graphics initialization */
+ gfxp = gfxp_vgatext_softc_alloc();
+ ret = gfxp_vgatext_attach(dev->devinfo, DDI_ATTACH, gfxp);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("gfxp_vgatext_attach failed");
+ return (EFAULT);
+ }
+ minor->private = gfxp;
+
+ ret = ddi_create_minor_node(dev->devinfo,
+ minor->name, S_IFCHR, minor->index, DDI_NT_DISPLAY, NULL);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("ddi_create_minor_node failed");
+ return (EFAULT);
+ }
+ return (0);
+
+ case DRM_MINOR_LEGACY:
+ case DRM_MINOR_CONTROL:
+ case DRM_MINOR_RENDER:
+ ret = ddi_create_minor_node(dev->devinfo,
+ minor->name, S_IFCHR, minor->index, DDI_NT_DISPLAY_DRM, NULL);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("ddi_create_minor_node failed");
+ return (EFAULT);
+ }
+ return (0);
+ }
+
+ return (ENOTSUP);
+}
+
+/**
+ * drm_sysfs_device_remove - remove DRM device
+ * @dev: DRM device to remove
+ *
+ * This call unregisters and cleans up a class device that was created with a
+ * call to drm_sysfs_device_add()
+ */
+void drm_sysfs_device_remove(struct drm_minor *minor)
+{
+ switch (minor->type) {
+ case DRM_MINOR_AGPMASTER:
+ if (minor->private) {
+ agpmaster_detach(
+ (agp_master_softc_t **)&minor->private);
+ minor->private = NULL;
+ }
+ break;
+
+ case DRM_MINOR_VGATEXT:
+ if (minor->private) {
+ (void) gfxp_vgatext_detach(minor->dev->devinfo,
+ DDI_DETACH, minor->private);
+ gfxp_vgatext_softc_free(minor->private);
+ minor->private = NULL;
+ }
+
+ /* LINTED */
+ case DRM_MINOR_LEGACY:
+ case DRM_MINOR_CONTROL:
+ case DRM_MINOR_RENDER:
+ ddi_remove_minor_node(minor->dev->devinfo, minor->name);
+ }
+}
diff --git a/usr/src/uts/common/io/drm/queue.h b/usr/src/uts/common/io/drm/queue.h
deleted file mode 100644
index 4994209..0000000
--- a/usr/src/uts/common/io/drm/queue.h
+++ /dev/null
@@ -1,585 +0,0 @@
-/* BEGIN CSTYLED */
-/*-
- * Copyright (c) 1991, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)queue.h 8.5 (Berkeley) 8/20/94
- * $FreeBSD: /repoman/r/ncvs/src/sys/sys/queue.h,v 1.66 2006/05/26 18:17:53 emaste Exp $
- */
-
-#ifndef _SYS_QUEUE_H_
-#define _SYS_QUEUE_H_
-
-#pragma ident "%Z%%M% %I% %E% SMI"
-
-/*
- * This file defines four types of data structures: singly-linked lists,
- * singly-linked tail queues, lists and tail queues.
- *
- * A singly-linked list is headed by a single forward pointer. The elements
- * are singly linked for minimum space and pointer manipulation overhead at
- * the expense of O(n) removal for arbitrary elements. New elements can be
- * added to the list after an existing element or at the head of the list.
- * Elements being removed from the head of the list should use the explicit
- * macro for this purpose for optimum efficiency. A singly-linked list may
- * only be traversed in the forward direction. Singly-linked lists are ideal
- * for applications with large datasets and few or no removals or for
- * implementing a LIFO queue.
- *
- * A singly-linked tail queue is headed by a pair of pointers, one to the
- * head of the list and the other to the tail of the list. The elements are
- * singly linked for minimum space and pointer manipulation overhead at the
- * expense of O(n) removal for arbitrary elements. New elements can be added
- * to the list after an existing element, at the head of the list, or at the
- * end of the list. Elements being removed from the head of the tail queue
- * should use the explicit macro for this purpose for optimum efficiency.
- * A singly-linked tail queue may only be traversed in the forward direction.
- * Singly-linked tail queues are ideal for applications with large datasets
- * and few or no removals or for implementing a FIFO queue.
- *
- * A list is headed by a single forward pointer (or an array of forward
- * pointers for a hash table header). The elements are doubly linked
- * so that an arbitrary element can be removed without a need to
- * traverse the list. New elements can be added to the list before
- * or after an existing element or at the head of the list. A list
- * may only be traversed in the forward direction.
- *
- * A tail queue is headed by a pair of pointers, one to the head of the
- * list and the other to the tail of the list. The elements are doubly
- * linked so that an arbitrary element can be removed without a need to
- * traverse the list. New elements can be added to the list before or
- * after an existing element, at the head of the list, or at the end of
- * the list. A tail queue may be traversed in either direction.
- *
- * For details on the use of these macros, see the queue(3) manual page.
- *
- *
- * SLIST LIST STAILQ TAILQ
- * _HEAD + + + +
- * _HEAD_INITIALIZER + + + +
- * _ENTRY + + + +
- * _INIT + + + +
- * _EMPTY + + + +
- * _FIRST + + + +
- * _NEXT + + + +
- * _PREV - - - +
- * _LAST - - + +
- * _FOREACH + + + +
- * _FOREACH_SAFE + + + +
- * _FOREACH_REVERSE - - - +
- * _FOREACH_REVERSE_SAFE - - - +
- * _INSERT_HEAD + + + +
- * _INSERT_BEFORE - + - +
- * _INSERT_AFTER + + + +
- * _INSERT_TAIL - - + +
- * _CONCAT - - + +
- * _REMOVE_HEAD + - + -
- * _REMOVE + + + +
- *
- */
-#ifdef QUEUE_MACRO_DEBUG
-/* Store the last 2 places the queue element or head was altered */
-struct qm_trace {
- char * lastfile;
- int lastline;
- char * prevfile;
- int prevline;
-};
-
-#define TRACEBUF struct qm_trace trace;
-#define TRASHIT(x) do {(x) = (void *)-1;} while (*"\0")
-
-#define QMD_TRACE_HEAD(head) do { \
- (head)->trace.prevline = (head)->trace.lastline; \
- (head)->trace.prevfile = (head)->trace.lastfile; \
- (head)->trace.lastline = __LINE__; \
- (head)->trace.lastfile = __FILE__; \
-} while (*"\0")
-
-#define QMD_TRACE_ELEM(elem) do { \
- (elem)->trace.prevline = (elem)->trace.lastline; \
- (elem)->trace.prevfile = (elem)->trace.lastfile; \
- (elem)->trace.lastline = __LINE__; \
- (elem)->trace.lastfile = __FILE__; \
-} while (*"\0")
-
-#else
-#define QMD_TRACE_ELEM(elem)
-#define QMD_TRACE_HEAD(head)
-#define TRACEBUF
-#define TRASHIT(x)
-#endif /* QUEUE_MACRO_DEBUG */
-
-/*
- * Singly-linked List declarations.
- */
-#define SLIST_HEAD(name, type) \
-struct name { \
- struct type *slh_first; /* first element */ \
-}
-
-#define SLIST_HEAD_INITIALIZER(head) \
- { NULL }
-
-#define SLIST_ENTRY(type) \
-struct { \
- struct type *sle_next; /* next element */ \
-}
-
-/*
- * Singly-linked List functions.
- */
-#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
-
-#define SLIST_FIRST(head) ((head)->slh_first)
-
-#define SLIST_FOREACH(var, head, field) \
- for ((var) = SLIST_FIRST((head)); \
- (var); \
- (var) = SLIST_NEXT((var), field))
-
-#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
- for ((var) = SLIST_FIRST((head)); \
- (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
- (var) = (tvar))
-
-#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
- for ((varp) = &SLIST_FIRST((head)); \
- ((var) = *(varp)) != NULL; \
- (varp) = &SLIST_NEXT((var), field))
-
-#define SLIST_INIT(head) do { \
- SLIST_FIRST((head)) = NULL; \
-} while (*"\0")
-
-#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
- SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
- SLIST_NEXT((slistelm), field) = (elm); \
-} while (*"\0")
-
-#define SLIST_INSERT_HEAD(head, elm, field) do { \
- SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
- SLIST_FIRST((head)) = (elm); \
-} while (*"\0")
-
-#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
-
-#define SLIST_REMOVE(head, elm, type, field) do { \
- if (SLIST_FIRST((head)) == (elm)) { \
- SLIST_REMOVE_HEAD((head), field); \
- } \
- else { \
- struct type *curelm = SLIST_FIRST((head)); \
- while (SLIST_NEXT(curelm, field) != (elm)) \
- curelm = SLIST_NEXT(curelm, field); \
- SLIST_NEXT(curelm, field) = \
- SLIST_NEXT(SLIST_NEXT(curelm, field), field); \
- } \
- TRASHIT((elm)->field.sle_next); \
-} while (*"\0")
-
-#define SLIST_REMOVE_HEAD(head, field) do { \
- SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
-} while (*"\0")
-
-/*
- * Singly-linked Tail queue declarations.
- */
-#define STAILQ_HEAD(name, type) \
-struct name { \
- struct type *stqh_first;/* first element */ \
- struct type **stqh_last;/* addr of last next element */ \
-}
-
-#define STAILQ_HEAD_INITIALIZER(head) \
- { NULL, &(head).stqh_first }
-
-#define STAILQ_ENTRY(type) \
-struct { \
- struct type *stqe_next; /* next element */ \
-}
-
-/*
- * Singly-linked Tail queue functions.
- */
-#define STAILQ_CONCAT(head1, head2) do { \
- if (!STAILQ_EMPTY((head2))) { \
- *(head1)->stqh_last = (head2)->stqh_first; \
- (head1)->stqh_last = (head2)->stqh_last; \
- STAILQ_INIT((head2)); \
- } \
-} while (*"\0")
-
-#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
-
-#define STAILQ_FIRST(head) ((head)->stqh_first)
-
-#define STAILQ_FOREACH(var, head, field) \
- for((var) = STAILQ_FIRST((head)); \
- (var); \
- (var) = STAILQ_NEXT((var), field))
-
-
-#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
- for ((var) = STAILQ_FIRST((head)); \
- (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
- (var) = (tvar))
-
-#define STAILQ_INIT(head) do { \
- STAILQ_FIRST((head)) = NULL; \
- (head)->stqh_last = &STAILQ_FIRST((head)); \
-} while (*"\0")
-
-#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
- if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
- (head)->stqh_last = &STAILQ_NEXT((elm), field); \
- STAILQ_NEXT((tqelm), field) = (elm); \
-} while (*"\0")
-
-#define STAILQ_INSERT_HEAD(head, elm, field) do { \
- if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
- (head)->stqh_last = &STAILQ_NEXT((elm), field); \
- STAILQ_FIRST((head)) = (elm); \
-} while (*"\0")
-
-#define STAILQ_INSERT_TAIL(head, elm, field) do { \
- STAILQ_NEXT((elm), field) = NULL; \
- *(head)->stqh_last = (elm); \
- (head)->stqh_last = &STAILQ_NEXT((elm), field); \
-} while (*"\0")
-
-#define STAILQ_LAST(head, type, field) \
- (STAILQ_EMPTY((head)) ? \
- NULL : \
- ((struct type *)(void *) \
- ((char *)((head)->stqh_last) - __offsetof(struct type, field))))
-
-#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
-
-#define STAILQ_REMOVE(head, elm, type, field) do { \
- if (STAILQ_FIRST((head)) == (elm)) { \
- STAILQ_REMOVE_HEAD((head), field); \
- } \
- else { \
- struct type *curelm = STAILQ_FIRST((head)); \
- while (STAILQ_NEXT(curelm, field) != (elm)) \
- curelm = STAILQ_NEXT(curelm, field); \
- if ((STAILQ_NEXT(curelm, field) = \
- STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\
- (head)->stqh_last = &STAILQ_NEXT((curelm), field);\
- } \
- TRASHIT((elm)->field.stqe_next); \
-} while (*"\0")
-
-#define STAILQ_REMOVE_HEAD(head, field) do { \
- if ((STAILQ_FIRST((head)) = \
- STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
- (head)->stqh_last = &STAILQ_FIRST((head)); \
-} while (*"\0")
-
-#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
- if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
- (head)->stqh_last = &STAILQ_FIRST((head)); \
-} while (*"\0")
-
-/*
- * List declarations.
- */
-#define LIST_HEAD(name, type) \
-struct name { \
- struct type *lh_first; /* first element */ \
-}
-
-#define LIST_HEAD_INITIALIZER(head) \
- { NULL }
-
-#define LIST_ENTRY(type) \
-struct { \
- struct type *le_next; /* next element */ \
- struct type **le_prev; /* address of previous next element */ \
-}
-
-/*
- * List functions.
- */
-
-#if (defined(_KERNEL) && defined(INVARIANTS))
-#define QMD_LIST_CHECK_HEAD(head, field) do { \
- if (LIST_FIRST((head)) != NULL && \
- LIST_FIRST((head))->field.le_prev != \
- &LIST_FIRST((head))) \
- panic("Bad list head %p first->prev != head", (head)); \
-} while (*"\0")
-
-#define QMD_LIST_CHECK_NEXT(elm, field) do { \
- if (LIST_NEXT((elm), field) != NULL && \
- LIST_NEXT((elm), field)->field.le_prev != \
- &((elm)->field.le_next)) \
- panic("Bad link elm %p next->prev != elm", (elm)); \
-} while (*"\0")
-
-#define QMD_LIST_CHECK_PREV(elm, field) do { \
- if (*(elm)->field.le_prev != (elm)) \
- panic("Bad link elm %p prev->next != elm", (elm)); \
-} while (*"\0")
-#else
-#define QMD_LIST_CHECK_HEAD(head, field)
-#define QMD_LIST_CHECK_NEXT(elm, field)
-#define QMD_LIST_CHECK_PREV(elm, field)
-#endif /* (_KERNEL && INVARIANTS) */
-
-#define LIST_EMPTY(head) ((head)->lh_first == NULL)
-
-#define LIST_FIRST(head) ((head)->lh_first)
-
-#define LIST_FOREACH(var, head, field) \
- for ((var) = LIST_FIRST((head)); \
- (var); \
- (var) = LIST_NEXT((var), field))
-
-#define LIST_FOREACH_SAFE(var, head, field, tvar) \
- for ((var) = LIST_FIRST((head)); \
- (var) && ((tvar) = LIST_NEXT((var), field), 1); \
- (var) = (tvar))
-
-#define LIST_INIT(head) do { \
- LIST_FIRST((head)) = NULL; \
-} while (*"\0")
-
-#define LIST_INSERT_AFTER(listelm, elm, field) do { \
- QMD_LIST_CHECK_NEXT(listelm, field); \
- if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
- LIST_NEXT((listelm), field)->field.le_prev = \
- &LIST_NEXT((elm), field); \
- LIST_NEXT((listelm), field) = (elm); \
- (elm)->field.le_prev = &LIST_NEXT((listelm), field); \
-} while (*"\0")
-
-#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
- QMD_LIST_CHECK_PREV(listelm, field); \
- (elm)->field.le_prev = (listelm)->field.le_prev; \
- LIST_NEXT((elm), field) = (listelm); \
- *(listelm)->field.le_prev = (elm); \
- (listelm)->field.le_prev = &LIST_NEXT((elm), field); \
-} while (*"\0")
-
-#define LIST_INSERT_HEAD(head, elm, field) do { \
- QMD_LIST_CHECK_HEAD((head), field); \
- if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
- LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
- LIST_FIRST((head)) = (elm); \
- (elm)->field.le_prev = &LIST_FIRST((head)); \
-} while (*"\0")
-
-#define LIST_NEXT(elm, field) ((elm)->field.le_next)
-
-#define LIST_REMOVE(elm, field) do { \
- QMD_LIST_CHECK_NEXT(elm, field); \
- QMD_LIST_CHECK_PREV(elm, field); \
- if (LIST_NEXT((elm), field) != NULL) \
- LIST_NEXT((elm), field)->field.le_prev = \
- (elm)->field.le_prev; \
- *(elm)->field.le_prev = LIST_NEXT((elm), field); \
- TRASHIT((elm)->field.le_next); \
- TRASHIT((elm)->field.le_prev); \
-} while (*"\0")
-
-/*
- * Tail queue declarations.
- */
-#define TAILQ_HEAD(name, type) \
-struct name { \
- struct type *tqh_first; /* first element */ \
- struct type **tqh_last; /* addr of last next element */ \
- TRACEBUF \
-}
-
-#define TAILQ_HEAD_INITIALIZER(head) \
- { NULL, &(head).tqh_first }
-
-#define TAILQ_ENTRY(type) \
-struct { \
- struct type *tqe_next; /* next element */ \
- struct type **tqe_prev; /* address of previous next element */ \
- TRACEBUF \
-}
-
-/*
- * Tail queue functions.
- */
-#if (defined(_KERNEL) && defined(INVARIANTS))
-#define QMD_TAILQ_CHECK_HEAD(head, field) do { \
- if (!TAILQ_EMPTY(head) && \
- TAILQ_FIRST((head))->field.tqe_prev != \
- &TAILQ_FIRST((head))) \
- panic("Bad tailq head %p first->prev != head", (head)); \
-} while (*"\0")
-
-#define QMD_TAILQ_CHECK_TAIL(head, field) do { \
- if (*(head)->tqh_last != NULL) \
- panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \
-} while (*"\0")
-
-#define QMD_TAILQ_CHECK_NEXT(elm, field) do { \
- if (TAILQ_NEXT((elm), field) != NULL && \
- TAILQ_NEXT((elm), field)->field.tqe_prev != \
- &((elm)->field.tqe_next)) \
- panic("Bad link elm %p next->prev != elm", (elm)); \
-} while (*"\0")
-
-#define QMD_TAILQ_CHECK_PREV(elm, field) do { \
- if (*(elm)->field.tqe_prev != (elm)) \
- panic("Bad link elm %p prev->next != elm", (elm)); \
-} while (*"\0")
-#else
-#define QMD_TAILQ_CHECK_HEAD(head, field)
-#define QMD_TAILQ_CHECK_TAIL(head, headname)
-#define QMD_TAILQ_CHECK_NEXT(elm, field)
-#define QMD_TAILQ_CHECK_PREV(elm, field)
-#endif /* (_KERNEL && INVARIANTS) */
-
-#define TAILQ_CONCAT(head1, head2, field) do { \
- if (!TAILQ_EMPTY(head2)) { \
- *(head1)->tqh_last = (head2)->tqh_first; \
- (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
- (head1)->tqh_last = (head2)->tqh_last; \
- TAILQ_INIT((head2)); \
- QMD_TRACE_HEAD(head1); \
- QMD_TRACE_HEAD(head2); \
- } \
-} while (*"\0")
-
-#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
-
-#define TAILQ_FIRST(head) ((head)->tqh_first)
-
-#define TAILQ_FOREACH(var, head, field) \
- for ((var) = TAILQ_FIRST((head)); \
- (var); \
- (var) = TAILQ_NEXT((var), field))
-
-#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
- for ((var) = TAILQ_FIRST((head)); \
- (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
- (var) = (tvar))
-
-#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
- for ((var) = TAILQ_LAST((head), headname); \
- (var); \
- (var) = TAILQ_PREV((var), headname, field))
-
-#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
- for ((var) = TAILQ_LAST((head), headname); \
- (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
- (var) = (tvar))
-
-#define TAILQ_INIT(head) do { \
- TAILQ_FIRST((head)) = NULL; \
- (head)->tqh_last = &TAILQ_FIRST((head)); \
- QMD_TRACE_HEAD(head); \
-} while (*"\0")
-
-#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
- QMD_TAILQ_CHECK_NEXT(listelm, field); \
- if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
- TAILQ_NEXT((elm), field)->field.tqe_prev = \
- &TAILQ_NEXT((elm), field); \
- else { \
- (head)->tqh_last = &TAILQ_NEXT((elm), field); \
- QMD_TRACE_HEAD(head); \
- } \
- TAILQ_NEXT((listelm), field) = (elm); \
- (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
- QMD_TRACE_ELEM(&(elm)->field); \
- QMD_TRACE_ELEM(&listelm->field); \
-} while (*"\0")
-
-#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
- QMD_TAILQ_CHECK_PREV(listelm, field); \
- (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
- TAILQ_NEXT((elm), field) = (listelm); \
- *(listelm)->field.tqe_prev = (elm); \
- (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
- QMD_TRACE_ELEM(&(elm)->field); \
- QMD_TRACE_ELEM(&listelm->field); \
-} while (*"\0")
-
-#define TAILQ_INSERT_HEAD(head, elm, field) do { \
- QMD_TAILQ_CHECK_HEAD(head, field); \
- if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
- TAILQ_FIRST((head))->field.tqe_prev = \
- &TAILQ_NEXT((elm), field); \
- else \
- (head)->tqh_last = &TAILQ_NEXT((elm), field); \
- TAILQ_FIRST((head)) = (elm); \
- (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
- QMD_TRACE_HEAD(head); \
- QMD_TRACE_ELEM(&(elm)->field); \
-} while (*"\0")
-
-#define TAILQ_INSERT_TAIL(head, elm, field) do { \
- QMD_TAILQ_CHECK_TAIL(head, field); \
- TAILQ_NEXT((elm), field) = NULL; \
- (elm)->field.tqe_prev = (head)->tqh_last; \
- *(head)->tqh_last = (elm); \
- (head)->tqh_last = &TAILQ_NEXT((elm), field); \
- QMD_TRACE_HEAD(head); \
- QMD_TRACE_ELEM(&(elm)->field); \
-} while (*"\0")
-
-#define TAILQ_LAST(head, headname) \
- (*(((struct headname *)((head)->tqh_last))->tqh_last))
-
-#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
-
-#define TAILQ_PREV(elm, headname, field) \
- (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
-
-#define TAILQ_REMOVE(head, elm, field) do { \
- QMD_TAILQ_CHECK_NEXT(elm, field); \
- QMD_TAILQ_CHECK_PREV(elm, field); \
- if ((TAILQ_NEXT((elm), field)) != NULL) \
- TAILQ_NEXT((elm), field)->field.tqe_prev = \
- (elm)->field.tqe_prev; \
- else { \
- (head)->tqh_last = (elm)->field.tqe_prev; \
- QMD_TRACE_HEAD(head); \
- } \
- *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
- TRASHIT((elm)->field.tqe_next); \
- TRASHIT((elm)->field.tqe_prev); \
- QMD_TRACE_ELEM(&(elm)->field); \
-} while (*"\0")
-
-
-#ifdef _KERNEL
-
-#endif /* _KERNEL */
-
-#endif /* !_SYS_QUEUE_H_ */
-
-/* END CSTYLED */
diff --git a/usr/src/uts/intel/io/drm/i915_dma.c b/usr/src/uts/intel/io/drm/i915_dma.c
deleted file mode 100644
index 48ff692..0000000
--- a/usr/src/uts/intel/io/drm/i915_dma.c
+++ /dev/null
@@ -1,1146 +0,0 @@
-/* BEGIN CSTYLED */
-
-/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
- */
-/*
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * Copyright (c) 2009, Intel Corporation.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-
-
-/* Really want an OS-independent resettable timer. Would like to have
- * this loop run for (eg) 3 sec, but have the timer reset every time
- * the head pointer changes, so that EBUSY only happens if the ring
- * actually stalls for (eg) 3 seconds.
- */
-/*ARGSUSED*/
-int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
- u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
- u32 last_acthd = I915_READ(acthd_reg);
- u32 acthd;
- int i;
-
- for (i = 0; i < 100000; i++) {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- acthd = I915_READ(acthd_reg);
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
- if (ring->space >= n)
- return 0;
-
- if (ring->head != last_head)
- i = 0;
-
- if (acthd != last_acthd)
- i = 0;
-
- last_head = ring->head;
- last_acthd = acthd;
- DRM_UDELAY(10);
- }
-
- return (EBUSY);
-}
-
-int i915_init_hardware_status(drm_device_t *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_dma_handle_t *dmah;
-
- /* Program Hardware Status Page */
- dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff,1);
-
- if (!dmah) {
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
-
- dev_priv->status_page_dmah = dmah;
- dev_priv->hw_status_page = (void *)dmah->vaddr;
- dev_priv->dma_status_page = dmah->paddr;
-
- (void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-
- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
- (void) I915_READ(HWS_PGA);
-
- DRM_DEBUG("Enabled hardware status page add 0x%lx read GEM HWS 0x%x\n",dev_priv->hw_status_page, READ_HWSP(dev_priv, 0x20));
- return 0;
-}
-
-void i915_free_hardware_status(drm_device_t *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- if (!I915_NEED_GFX_HWS(dev)) {
- if (dev_priv->status_page_dmah) {
- DRM_DEBUG("free status_page_dmal %x", dev_priv->status_page_dmah);
- drm_pci_free(dev, dev_priv->status_page_dmah);
- dev_priv->status_page_dmah = NULL;
- /* Need to rewrite hardware status page */
- I915_WRITE(HWS_PGA, 0x1ffff000);
- }
- } else {
- if (dev_priv->status_gfx_addr) {
- DRM_DEBUG("free status_gfx_addr %x", dev_priv->status_gfx_addr);
- dev_priv->status_gfx_addr = 0;
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
- I915_WRITE(HWS_PGA, 0x1ffff000);
- }
- }
-
-}
-
-void i915_kernel_lost_context(drm_device_t * dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
-
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-
-}
-
-static int i915_dma_cleanup(drm_device_t * dev)
-{
- drm_i915_private_t *dev_priv =
- (drm_i915_private_t *) dev->dev_private;
-
- /* Make sure interrupts are disabled here because the uninstall ioctl
- * may not have been called from userspace and after dev_private
- * is freed, it's too late.
- */
- if (dev->irq_enabled)
- (void) drm_irq_uninstall(dev);
-
- if (dev_priv->ring.virtual_start) {
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
- dev_priv->ring.virtual_start = 0;
- dev_priv->ring.map.handle = 0;
- dev_priv->ring.map.size = 0;
- }
-
-#ifdef I915_HAVE_GEM
- if (I915_NEED_GFX_HWS(dev))
-#endif
- i915_free_hardware_status(dev);
-
- dev_priv->sarea = NULL;
- dev_priv->sarea_priv = NULL;
-
- return 0;
-}
-
-static int i915_initialize(drm_device_t * dev,
- drm_i915_init_t * init)
-{
- drm_i915_private_t *dev_priv =
- (drm_i915_private_t *)dev->dev_private;
-
- DRM_GETSAREA();
- if (!dev_priv->sarea) {
- DRM_ERROR("can not find sarea!\n");
- dev->dev_private = (void *)dev_priv;
- (void) i915_dma_cleanup(dev);
- return (EINVAL);
- }
-
- dev_priv->sarea_priv = (drm_i915_sarea_t *)(uintptr_t)
- ((u8 *) dev_priv->sarea->handle +
- init->sarea_priv_offset);
-
- if (init->ring_size != 0) {
- if (dev_priv->ring.ring_obj != NULL) {
- (void) i915_dma_cleanup(dev);
- DRM_ERROR("Client tried to initialize ringbuffer in "
- "GEM mode\n");
- return -EINVAL;
- }
-
- dev_priv->ring.Size = init->ring_size;
- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
- dev_priv->ring.map.offset = (u_offset_t)init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = 0;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_core_ioremap(&dev_priv->ring.map, dev);
-
- if (dev_priv->ring.map.handle == NULL) {
- (void) i915_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return (ENOMEM);
- }
- }
-
- dev_priv->ring.virtual_start = (u8 *)dev_priv->ring.map.dev_addr;
- dev_priv->cpp = init->cpp;
- dev_priv->back_offset = init->back_offset;
- dev_priv->front_offset = init->front_offset;
- dev_priv->current_page = 0;
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-
- /* Allow hardware batchbuffers unless told otherwise.
- */
- dev_priv->allow_batchbuffer = 1;
- return 0;
-}
-
-static int i915_dma_resume(drm_device_t * dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- if (!dev_priv->sarea) {
- DRM_ERROR("can not find sarea!\n");
- return (EINVAL);
- }
-
- if (dev_priv->ring.map.handle == NULL) {
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return (ENOMEM);
- }
-
- /* Program Hardware Status Page */
- if (!dev_priv->hw_status_page) {
- DRM_ERROR("Can not find hardware status page\n");
- return (EINVAL);
- }
- DRM_DEBUG("i915_dma_resume hw status page @ %p\n", dev_priv->hw_status_page);
-
- if (!I915_NEED_GFX_HWS(dev))
- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
- else
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- DRM_DEBUG("Enabled hardware status page\n");
-
- return 0;
-}
-
-/*ARGSUSED*/
-static int i915_dma_init(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_init_t init;
- int retcode = 0;
-
- DRM_COPYFROM_WITH_RETURN(&init, (drm_i915_init_t *)data, sizeof(init));
-
- switch (init.func) {
- case I915_INIT_DMA:
- retcode = i915_initialize(dev, &init);
- break;
- case I915_CLEANUP_DMA:
- retcode = i915_dma_cleanup(dev);
- break;
- case I915_RESUME_DMA:
- retcode = i915_dma_resume(dev);
- break;
- default:
- retcode = EINVAL;
- break;
- }
-
- return retcode;
-}
-
-/* Implement basically the same security restrictions as hardware does
- * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
- *
- * Most of the calculations below involve calculating the size of a
- * particular instruction. It's important to get the size right as
- * that tells us where the next instruction to check is. Any illegal
- * instruction detected will be given a size of zero, which is a
- * signal to abort the rest of the buffer.
- */
-static int do_validate_cmd(int cmd)
-{
- switch (((cmd >> 29) & 0x7)) {
- case 0x0:
- switch ((cmd >> 23) & 0x3f) {
- case 0x0:
- return 1; /* MI_NOOP */
- case 0x4:
- return 1; /* MI_FLUSH */
- default:
- return 0; /* disallow everything else */
- }
-#ifndef __SUNPRO_C
- break;
-#endif
- case 0x1:
- return 0; /* reserved */
- case 0x2:
- return (cmd & 0xff) + 2; /* 2d commands */
- case 0x3:
- if (((cmd >> 24) & 0x1f) <= 0x18)
- return 1;
-
- switch ((cmd >> 24) & 0x1f) {
- case 0x1c:
- return 1;
- case 0x1d:
- switch ((cmd >> 16) & 0xff) {
- case 0x3:
- return (cmd & 0x1f) + 2;
- case 0x4:
- return (cmd & 0xf) + 2;
- default:
- return (cmd & 0xffff) + 2;
- }
- case 0x1e:
- if (cmd & (1 << 23))
- return (cmd & 0xffff) + 1;
- else
- return 1;
- case 0x1f:
- if ((cmd & (1 << 23)) == 0) /* inline vertices */
- return (cmd & 0x1ffff) + 2;
- else if (cmd & (1 << 17)) /* indirect random */
- if ((cmd & 0xffff) == 0)
- return 0; /* unknown length, too hard */
- else
- return (((cmd & 0xffff) + 1) / 2) + 1;
- else
- return 2; /* indirect sequential */
- default:
- return 0;
- }
- default:
- return 0;
- }
-
-#ifndef __SUNPRO_C
- return 0;
-#endif
-}
-
-static int validate_cmd(int cmd)
-{
- int ret = do_validate_cmd(cmd);
-
-/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
-
- return ret;
-}
-
-static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
- RING_LOCALS;
-
- if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) {
- DRM_ERROR(" emit cmds invalid arg");
- return (EINVAL);
- }
- BEGIN_LP_RING((dwords+1)&~1);
-
- for (i = 0; i < dwords;) {
- int cmd, sz;
-
- if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) {
- DRM_ERROR("emit cmds failed to get cmd from user");
- return (EINVAL);
- }
-
- if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) {
- DRM_ERROR("emit cmds invalid");
- return (EINVAL);
- }
- OUT_RING(cmd);
-
- while (++i, --sz) {
- if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
- sizeof(cmd))) {
- DRM_ERROR("emit cmds failed get cmds");
- return (EINVAL);
- }
- OUT_RING(cmd);
- }
- }
-
- if (dwords & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-
- return 0;
-}
-
-int i915_emit_box(drm_device_t * dev,
- drm_clip_rect_t __user * boxes,
- int i, int DR1, int DR4)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_clip_rect_t box;
- RING_LOCALS;
-
- if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
- DRM_ERROR("emit box failed to copy from user");
- return (EFAULT);
- }
-
- if (box.y2 <= box.y1 || box.x2 <= box.x1) {
- DRM_ERROR("Bad box %d,%d..%d,%d\n",
- box.x1, box.y1, box.x2, box.y2);
- return (EINVAL);
- }
-
- if (IS_I965G(dev)) {
- BEGIN_LP_RING(4);
- OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
- OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
- OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
- OUT_RING(DR4);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(6);
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(DR1);
- OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
- OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
- OUT_RING(DR4);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- return 0;
-}
-
-/* XXX: Emitting the counter should really be moved to part of the IRQ
- * emit. For now, do it in both places:
- */
-
-void i915_emit_breadcrumb(drm_device_t *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 0;
- if (dev_priv->sarea_priv)
- dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
-
-
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
-}
-
-static int i915_dispatch_cmdbuffer(drm_device_t * dev,
- drm_i915_cmdbuffer_t * cmd)
-{
- int nbox = cmd->num_cliprects;
- int i = 0, count, ret;
-
- if (cmd->sz & 0x3) {
- DRM_ERROR("alignment");
- return (EINVAL);
- }
-
- i915_kernel_lost_context(dev);
-
- count = nbox ? nbox : 1;
-
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- ret = i915_emit_box(dev, cmd->cliprects, i,
- cmd->DR1, cmd->DR4);
- if (ret)
- return ret;
- }
-
- ret = i915_emit_cmds(dev, (int __user *)(void *)cmd->buf, cmd->sz / 4);
- if (ret)
- return ret;
- }
-
- i915_emit_breadcrumb( dev );
- return 0;
-}
-
-static int i915_dispatch_batchbuffer(drm_device_t * dev,
- drm_i915_batchbuffer_t * batch)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_clip_rect_t __user *boxes = batch->cliprects;
- int nbox = batch->num_cliprects;
- int i = 0, count;
- RING_LOCALS;
-
- if ((batch->start | batch->used) & 0x7) {
- DRM_ERROR("alignment");
- return (EINVAL);
- }
-
- i915_kernel_lost_context(dev);
-
- count = nbox ? nbox : 1;
-
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- int ret = i915_emit_box(dev, boxes, i,
- batch->DR1, batch->DR4);
- if (ret)
- return ret;
- }
-
- if (IS_I830(dev) || IS_845G(dev)) {
- BEGIN_LP_RING(4);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(batch->start | MI_BATCH_NON_SECURE);
- OUT_RING(batch->start + batch->used - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(2);
- if (IS_I965G(dev)) {
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
- OUT_RING(batch->start);
- } else {
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
- OUT_RING(batch->start | MI_BATCH_NON_SECURE);
- }
- ADVANCE_LP_RING();
- }
- }
-
- i915_emit_breadcrumb( dev );
-
- return 0;
-}
-
-static int i915_dispatch_flip(struct drm_device * dev, int planes)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- if (!dev_priv->sarea_priv)
- return -EINVAL;
-
- DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
- planes, dev_priv->sarea_priv->pf_current_page);
-
- i915_kernel_lost_context(dev);
-
- BEGIN_LP_RING(2);
- OUT_RING(MI_FLUSH | MI_READ_FLUSH);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(6);
- OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
- OUT_RING(0);
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_offset);
- dev_priv->current_page = 1;
- } else {
- OUT_RING(dev_priv->front_offset);
- dev_priv->current_page = 0;
- }
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(2);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
-
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
- return 0;
-}
-
-static int i915_quiescent(drm_device_t * dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
- i915_kernel_lost_context(dev);
- ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
-
- if (ret)
- {
- i915_kernel_lost_context (dev);
- DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
- dev_priv->ring.head,
- dev_priv->ring.tail,
- dev_priv->ring.space);
- }
- return ret;
-}
-
-/*ARGSUSED*/
-static int i915_flush_ioctl(DRM_IOCTL_ARGS)
-{
- int ret;
- DRM_DEVICE;
-
- LOCK_TEST_WITH_RETURN(dev, fpriv);
-
- spin_lock(&dev->struct_mutex);
- ret = i915_quiescent(dev);
- spin_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-/*ARGSUSED*/
-static int i915_batchbuffer(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
- dev_priv->sarea_priv;
- drm_i915_batchbuffer_t batch;
- int ret;
-
- if (!dev_priv->allow_batchbuffer) {
- DRM_ERROR("Batchbuffer ioctl disabled\n");
- return (EINVAL);
- }
-
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_i915_batchbuffer32_t batchbuffer32_t;
-
- DRM_COPYFROM_WITH_RETURN(&batchbuffer32_t,
- (void *) data, sizeof (batchbuffer32_t));
-
- batch.start = batchbuffer32_t.start;
- batch.used = batchbuffer32_t.used;
- batch.DR1 = batchbuffer32_t.DR1;
- batch.DR4 = batchbuffer32_t.DR4;
- batch.num_cliprects = batchbuffer32_t.num_cliprects;
- batch.cliprects = (drm_clip_rect_t __user *)
- (uintptr_t)batchbuffer32_t.cliprects;
- } else
- DRM_COPYFROM_WITH_RETURN(&batch, (void *) data,
- sizeof(batch));
-
- DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d, counter %d\n",
- batch.start, batch.used, batch.num_cliprects, dev_priv->counter);
-
- LOCK_TEST_WITH_RETURN(dev, fpriv);
-
-/*
- if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
- batch.num_cliprects *
- sizeof(drm_clip_rect_t)))
- return (EFAULT);
-
-*/
-
- spin_lock(&dev->struct_mutex);
- ret = i915_dispatch_batchbuffer(dev, &batch);
- spin_unlock(&dev->struct_mutex);
- sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
- return ret;
-}
-
-/*ARGSUSED*/
-static int i915_cmdbuffer(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
- dev_priv->sarea_priv;
- drm_i915_cmdbuffer_t cmdbuf;
- int ret;
-
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_i915_cmdbuffer32_t cmdbuffer32_t;
-
- DRM_COPYFROM_WITH_RETURN(&cmdbuffer32_t,
- (drm_i915_cmdbuffer32_t __user *) data,
- sizeof (drm_i915_cmdbuffer32_t));
-
- cmdbuf.buf = (char __user *)(uintptr_t)cmdbuffer32_t.buf;
- cmdbuf.sz = cmdbuffer32_t.sz;
- cmdbuf.DR1 = cmdbuffer32_t.DR1;
- cmdbuf.DR4 = cmdbuffer32_t.DR4;
- cmdbuf.num_cliprects = cmdbuffer32_t.num_cliprects;
- cmdbuf.cliprects = (drm_clip_rect_t __user *)
- (uintptr_t)cmdbuffer32_t.cliprects;
- } else
- DRM_COPYFROM_WITH_RETURN(&cmdbuf, (void *) data,
- sizeof(cmdbuf));
-
- DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
- cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects);
-
- LOCK_TEST_WITH_RETURN(dev, fpriv);
-
-/*
- if (cmdbuf.num_cliprects &&
- DRM_VERIFYAREA_READ(cmdbuf.cliprects,
- cmdbuf.num_cliprects *
- sizeof(drm_clip_rect_t))) {
- DRM_ERROR("Fault accessing cliprects\n");
- return (EFAULT);
- }
-*/
-
- spin_lock(&dev->struct_mutex);
- ret = i915_dispatch_cmdbuffer(dev, &cmdbuf);
- spin_unlock(&dev->struct_mutex);
- if (ret) {
- DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
- return ret;
- }
-
- sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- return 0;
-}
-
-/*ARGSUSED*/
-static int i915_flip_bufs(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_flip_t param;
- int ret;
- DRM_COPYFROM_WITH_RETURN(&param, (drm_i915_flip_t *) data,
- sizeof(param));
-
- DRM_DEBUG("i915_flip_bufs\n");
-
- LOCK_TEST_WITH_RETURN(dev, fpriv);
-
- spin_lock(&dev->struct_mutex);
- ret = i915_dispatch_flip(dev, param.pipes);
- spin_unlock(&dev->struct_mutex);
- return ret;
-}
-
-/*ARGSUSED*/
-static int i915_getparam(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_getparam_t param;
- int value;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return (EINVAL);
- }
-
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_i915_getparam32_t getparam32_t;
-
- DRM_COPYFROM_WITH_RETURN(&getparam32_t,
- (drm_i915_getparam32_t __user *) data,
- sizeof (drm_i915_getparam32_t));
-
- param.param = getparam32_t.param;
- param.value = (int __user *)(uintptr_t)getparam32_t.value;
- } else
- DRM_COPYFROM_WITH_RETURN(&param,
- (drm_i915_getparam_t *) data, sizeof(param));
-
- switch (param.param) {
- case I915_PARAM_IRQ_ACTIVE:
- value = dev->irq_enabled ? 1 : 0;
- break;
- case I915_PARAM_ALLOW_BATCHBUFFER:
- value = dev_priv->allow_batchbuffer ? 1 : 0;
- break;
- case I915_PARAM_LAST_DISPATCH:
- value = READ_BREADCRUMB(dev_priv);
- break;
- case I915_PARAM_CHIPSET_ID:
- value = dev->pci_device;
- break;
- case I915_PARAM_HAS_GEM:
- value = dev->driver->use_gem;
- break;
- default:
- DRM_ERROR("Unknown get parameter %d\n", param.param);
- return (EINVAL);
- }
-
- if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
- DRM_ERROR("i915_getparam failed\n");
- return (EFAULT);
- }
- return 0;
-}
-
-/*ARGSUSED*/
-static int i915_setparam(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_setparam_t param;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return (EINVAL);
- }
-
- DRM_COPYFROM_WITH_RETURN(&param, (drm_i915_setparam_t *) data,
- sizeof(param));
-
- switch (param.param) {
- case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
- break;
- case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
- dev_priv->tex_lru_log_granularity = param.value;
- break;
- case I915_SETPARAM_ALLOW_BATCHBUFFER:
- dev_priv->allow_batchbuffer = param.value;
- break;
- default:
- DRM_ERROR("unknown set parameter %d\n", param.param);
- return (EINVAL);
- }
-
- return 0;
-}
-
-/*ARGSUSED*/
-static int i915_set_status_page(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_hws_addr_t hws;
-
- if (!I915_NEED_GFX_HWS(dev))
- return (EINVAL);
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return (EINVAL);
- }
- DRM_COPYFROM_WITH_RETURN(&hws, (drm_i915_hws_addr_t __user *) data,
- sizeof(hws));
-DRM_ERROR("i915_set_status_page set status page addr 0x%08x\n", (u32)hws.addr);
-
- dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12);
- DRM_DEBUG("set gfx_addr 0x%08x\n", dev_priv->status_gfx_addr);
-
- dev_priv->hws_map.offset =
- (u_offset_t)dev->agp->agp_info.agpi_aperbase + hws.addr;
- dev_priv->hws_map.size = 4 * 1024; /* 4K pages */
- dev_priv->hws_map.type = 0;
- dev_priv->hws_map.flags = 0;
- dev_priv->hws_map.mtrr = 0;
-
- DRM_DEBUG("set status page: i915_set_status_page: mapoffset 0x%llx\n",
- dev_priv->hws_map.offset);
- drm_core_ioremap(&dev_priv->hws_map, dev);
- if (dev_priv->hws_map.handle == NULL) {
- dev->dev_private = (void *)dev_priv;
- (void) i915_dma_cleanup(dev);
- dev_priv->status_gfx_addr = 0;
- DRM_ERROR("can not ioremap virtual address for"
- " G33 hw status page\n");
- return (ENOMEM);
- }
- dev_priv->hw_status_page = dev_priv->hws_map.dev_addr;
-
- (void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
- dev_priv->status_gfx_addr);
- DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
- return 0;
-}
-
-/*ARGSUSED*/
-int i915_driver_load(drm_device_t *dev, unsigned long flags)
-{
- struct drm_i915_private *dev_priv;
- unsigned long base, size;
- int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
-
- /* i915 has 4 more counters */
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
- dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
- if (dev_priv == NULL)
- return ENOMEM;
-
- (void) memset(dev_priv, 0, sizeof(drm_i915_private_t));
- dev->dev_private = (void *)dev_priv;
- dev_priv->dev = dev;
-
- /* Add register map (needed for suspend/resume) */
-
- base = drm_get_resource_start(dev, mmio_bar);
- size = drm_get_resource_len(dev, mmio_bar);
- dev_priv->mmio_map = drm_alloc(sizeof (drm_local_map_t), DRM_MEM_MAPS);
- dev_priv->mmio_map->offset = base;
- dev_priv->mmio_map->size = size;
- dev_priv->mmio_map->type = _DRM_REGISTERS;
- dev_priv->mmio_map->flags = _DRM_REMOVABLE;
- (void) drm_ioremap(dev, dev_priv->mmio_map);
-
- DRM_DEBUG("i915_driverload mmio %p mmio_map->dev_addr %x", dev_priv->mmio_map, dev_priv->mmio_map->dev_addr);
-
-#if defined(__i386)
- dev->driver->use_gem = 0;
-#else
- if (IS_I965G(dev)) {
- dev->driver->use_gem = 1;
- } else {
- dev->driver->use_gem = 0;
- }
-#endif /* __i386 */
-
- dev->driver->get_vblank_counter = i915_get_vblank_counter;
- dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-#if defined(__i386)
- if (IS_G4X(dev) || IS_IGDNG(dev) || IS_GM45(dev))
-#else
- if (IS_G4X(dev) || IS_IGDNG(dev))
-#endif
- {
- dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
- dev->driver->get_vblank_counter = gm45_get_vblank_counter;
- }
-
-
-#ifdef I915_HAVE_GEM
- i915_gem_load(dev);
-#endif
-
- if (!I915_NEED_GFX_HWS(dev)) {
- ret = i915_init_hardware_status(dev);
- if(ret)
- return ret;
- }
-
- mutex_init(&dev_priv->user_irq_lock, "userirq", MUTEX_DRIVER, NULL);
- mutex_init(&dev_priv->error_lock, "error_lock", MUTEX_DRIVER, NULL);
-
- ret = drm_vblank_init(dev, I915_NUM_PIPE);
- if (ret) {
- (void) i915_driver_unload(dev);
- return ret;
- }
-
- return ret;
-}
-
-int i915_driver_unload(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- i915_free_hardware_status(dev);
-
- drm_rmmap(dev, dev_priv->mmio_map);
-
- mutex_destroy(&dev_priv->user_irq_lock);
-
- drm_free(dev->dev_private, sizeof(drm_i915_private_t),
- DRM_MEM_DRIVER);
- dev->dev_private = NULL;
-
- return 0;
-}
-
-/*ARGSUSED*/
-int i915_driver_open(drm_device_t * dev, struct drm_file *file_priv)
-{
- struct drm_i915_file_private *i915_file_priv;
-
- DRM_DEBUG("\n");
- i915_file_priv = (struct drm_i915_file_private *)
- drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
-
- if (!i915_file_priv)
- return -ENOMEM;
-
- file_priv->driver_priv = i915_file_priv;
-
- i915_file_priv->mm.last_gem_seqno = 0;
- i915_file_priv->mm.last_gem_throttle_seqno = 0;
-
- return 0;
-}
-
-void i915_driver_lastclose(drm_device_t * dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- /* agp off can use this to get called before dev_priv */
- if (!dev_priv)
- return;
-
-#ifdef I915_HAVE_GEM
- i915_gem_lastclose(dev);
-#endif
-
- DRM_GETSAREA();
- if (dev_priv->agp_heap)
- i915_mem_takedown(&(dev_priv->agp_heap));
- (void) i915_dma_cleanup(dev);
-}
-
-void i915_driver_preclose(drm_device_t * dev, drm_file_t *fpriv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- i915_mem_release(dev, fpriv, dev_priv->agp_heap);
-}
-
-/*ARGSUSED*/
-void i915_driver_postclose(drm_device_t * dev, struct drm_file *file_priv)
-{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
-
- drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
-}
-
-drm_ioctl_desc_t i915_ioctls[] = {
- [DRM_IOCTL_NR(DRM_I915_INIT)] =
- {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_I915_FLUSH)] =
- {i915_flush_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_FLIP)] =
- {i915_flip_bufs, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] =
- {i915_batchbuffer, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] =
- {i915_irq_emit, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] =
- {i915_irq_wait, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_GETPARAM)] =
- {i915_getparam, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_SETPARAM)] =
- {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_I915_ALLOC)] =
- {i915_mem_alloc, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_FREE)] =
- {i915_mem_free, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] =
- {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] =
- {i915_cmdbuffer, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] =
- {i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] =
- {i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] =
- {i915_vblank_pipe_get, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] =
- {i915_vblank_swap, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_HWS_ADDR)] =
- {i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
-#ifdef I915_HAVE_GEM
- [DRM_IOCTL_NR(DRM_I915_GEM_INIT)] =
- {i915_gem_init_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_GEM_EXECBUFFER)] =
- {i915_gem_execbuffer, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_GEM_PIN)] =
- {i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_I915_GEM_UNPIN)] =
- {i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_I915_GEM_BUSY)] =
- {i915_gem_busy_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_GEM_THROTTLE)] =
- {i915_gem_throttle_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_GEM_ENTERVT)] =
- {i915_gem_entervt_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_GEM_LEAVEVT)] =
- {i915_gem_leavevt_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_GEM_CREATE)] =
- {i915_gem_create_ioctl, 0},
- [DRM_IOCTL_NR(DRM_I915_GEM_PREAD)] =
- {i915_gem_pread_ioctl, 0},
- [DRM_IOCTL_NR(DRM_I915_GEM_PWRITE)] =
- {i915_gem_pwrite_ioctl, 0},
- [DRM_IOCTL_NR(DRM_I915_GEM_MMAP)] =
- {i915_gem_mmap_ioctl, 0},
- [DRM_IOCTL_NR(DRM_I915_GEM_SET_DOMAIN)] =
- {i915_gem_set_domain_ioctl, 0},
- [DRM_IOCTL_NR(DRM_I915_GEM_SW_FINISH)] =
- {i915_gem_sw_finish_ioctl, 0},
- [DRM_IOCTL_NR(DRM_I915_GEM_SET_TILING)] =
- {i915_gem_set_tiling, 0},
- [DRM_IOCTL_NR(DRM_I915_GEM_GET_TILING)] =
- {i915_gem_get_tiling, 0},
- [DRM_IOCTL_NR(DRM_I915_GEM_GET_APERTURE)] =
- {i915_gem_get_aperture_ioctl, 0},
-#endif
-};
-
-int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
-
-/**
- * Determine if the device really is AGP or not.
- *
- * All Intel graphics chipsets are treated as AGP, even if they are really
- * PCI-e.
- *
- * \param dev The device to be tested.
- *
- * \returns
- * A value of 1 is always retured to indictate every i9x5 is AGP.
- */
-/*ARGSUSED*/
-int i915_driver_device_is_agp(drm_device_t * dev)
-{
- return 1;
-}
-
diff --git a/usr/src/uts/intel/io/drm/i915_drv.c b/usr/src/uts/intel/io/drm/i915_drv.c
deleted file mode 100644
index 8989e1a..0000000
--- a/usr/src/uts/intel/io/drm/i915_drv.c
+++ /dev/null
@@ -1,1047 +0,0 @@
-/* BEGIN CSTYLED */
-
-/*
- * i915_drv.c -- Intel i915 driver -*- linux-c -*-
- * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
- */
-
-/*
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
- *
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-/*
- * Copyright 2014 RackTop Systems.
- */
-
-/*
- * I915 DRM Driver for Solaris
- *
- * This driver provides the hardware 3D acceleration support for Intel
- * integrated video devices (e.g. i8xx/i915/i945 series chipsets), under the
- * DRI (Direct Rendering Infrastructure). DRM (Direct Rendering Manager) here
- * means the kernel device driver in DRI.
- *
- * I915 driver is a device dependent driver only, it depends on a misc module
- * named drm for generic DRM operations.
- */
-
-#include "drmP.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-#include "drm_pciids.h"
-
-/*
- * copied from vgasubr.h
- */
-
-struct vgaregmap {
- uint8_t *addr;
- ddi_acc_handle_t handle;
- boolean_t mapped;
-};
-
-enum pipe {
- PIPE_A = 0,
- PIPE_B,
-};
-
-
-/*
- * cb_ops entrypoint
- */
-extern struct cb_ops drm_cb_ops;
-
-/*
- * module entrypoint
- */
-static int i915_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
-static int i915_attach(dev_info_t *, ddi_attach_cmd_t);
-static int i915_detach(dev_info_t *, ddi_detach_cmd_t);
-
-
-/* drv_PCI_IDs comes from drm_pciids.h */
-static drm_pci_id_list_t i915_pciidlist[] = {
- i915_PCI_IDS
-};
-
-/*
- * Local routines
- */
-static void i915_configure(drm_driver_t *);
-static int i915_quiesce(dev_info_t *dip);
-
-/*
- * DRM driver
- */
-static drm_driver_t i915_driver = {0};
-
-
-static struct dev_ops i915_dev_ops = {
- DEVO_REV, /* devo_rev */
- 0, /* devo_refcnt */
- i915_info, /* devo_getinfo */
- nulldev, /* devo_identify */
- nulldev, /* devo_probe */
- i915_attach, /* devo_attach */
- i915_detach, /* devo_detach */
- nodev, /* devo_reset */
- &drm_cb_ops, /* devo_cb_ops */
- NULL, /* devo_bus_ops */
- NULL, /* power */
- i915_quiesce, /* devo_quiesce */
-};
-
-static struct modldrv modldrv = {
- &mod_driverops, /* drv_modops */
- "I915 DRM driver", /* drv_linkinfo */
- &i915_dev_ops, /* drv_dev_ops */
-};
-
-static struct modlinkage modlinkage = {
- MODREV_1, (void *) &modldrv, NULL
-};
-
-static ddi_device_acc_attr_t s3_attr = {
- DDI_DEVICE_ATTR_V0,
- DDI_NEVERSWAP_ACC,
- DDI_STRICTORDER_ACC /* must be DDI_STRICTORDER_ACC */
-};
-
-/*
- * softstate head
- */
-static void *i915_statep;
-
-int
-_init(void)
-{
- int error;
-
- i915_configure(&i915_driver);
-
- if ((error = ddi_soft_state_init(&i915_statep,
- sizeof (drm_device_t), DRM_MAX_INSTANCES)) != 0)
- return (error);
-
- if ((error = mod_install(&modlinkage)) != 0) {
- ddi_soft_state_fini(&i915_statep);
- return (error);
- }
-
- return (error);
-
-} /* _init() */
-
-int
-_fini(void)
-{
- int error;
-
- if ((error = mod_remove(&modlinkage)) != 0)
- return (error);
-
- (void) ddi_soft_state_fini(&i915_statep);
-
- return (0);
-
-} /* _fini() */
-
-int
-_info(struct modinfo *modinfop)
-{
- return (mod_info(&modlinkage, modinfop));
-
-} /* _info() */
-
-/*
- * off range: 0x3b0 ~ 0x3ff
- */
-
-static void
-vga_reg_put8(struct vgaregmap *regmap, uint16_t off, uint8_t val)
-{
- ASSERT((off >= 0x3b0) && (off <= 0x3ff));
-
- ddi_put8(regmap->handle, regmap->addr + off, val);
-}
-
-/*
- * off range: 0x3b0 ~ 0x3ff
- */
-static uint8_t
-vga_reg_get8(struct vgaregmap *regmap, uint16_t off)
-{
-
- ASSERT((off >= 0x3b0) && (off <= 0x3ff));
-
- return (ddi_get8(regmap->handle, regmap->addr + off));
-}
-
-static void
-i915_write_indexed(struct vgaregmap *regmap,
- uint16_t index_port, uint16_t data_port, uint8_t index, uint8_t val)
-{
- vga_reg_put8(regmap, index_port, index);
- vga_reg_put8(regmap, data_port, val);
-}
-
-static uint8_t
-i915_read_indexed(struct vgaregmap *regmap,
- uint16_t index_port, uint16_t data_port, uint8_t index)
-{
- vga_reg_put8(regmap, index_port, index);
- return (vga_reg_get8(regmap, data_port));
-}
-
-static void
-i915_write_ar(struct vgaregmap *regmap, uint16_t st01,
- uint8_t reg, uint8_t val, uint8_t palette_enable)
-{
- (void) vga_reg_get8(regmap, st01);
- vga_reg_put8(regmap, VGA_AR_INDEX, palette_enable | reg);
- vga_reg_put8(regmap, VGA_AR_DATA_WRITE, val);
-}
-
-static uint8_t
-i915_read_ar(struct vgaregmap *regmap, uint16_t st01,
- uint8_t index, uint8_t palette_enable)
-{
- (void) vga_reg_get8(regmap, st01);
- vga_reg_put8(regmap, VGA_AR_INDEX, index | palette_enable);
- return (vga_reg_get8(regmap, VGA_AR_DATA_READ));
-}
-
-static int
-i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
-{
- struct s3_i915_private *s3_priv = dev->s3_private;
-
- if (pipe == PIPE_A)
- return (S3_READ(DPLL_A) & DPLL_VCO_ENABLE);
- else
- return (S3_READ(DPLL_B) & DPLL_VCO_ENABLE);
-}
-
-static void
-i915_save_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct s3_i915_private *s3_priv = dev->s3_private;
- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
- uint32_t *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (pipe == PIPE_A)
- array = s3_priv->save_palette_a;
- else
- array = s3_priv->save_palette_b;
-
- for(i = 0; i < 256; i++)
- array[i] = S3_READ(reg + (i << 2));
-
-}
-
-static void
-i915_restore_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct s3_i915_private *s3_priv = dev->s3_private;
- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
- uint32_t *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (pipe == PIPE_A)
- array = s3_priv->save_palette_a;
- else
- array = s3_priv->save_palette_b;
-
- for(i = 0; i < 256; i++)
- S3_WRITE(reg + (i << 2), array[i]);
-}
-
-static void
-i915_save_vga(struct drm_device *dev)
-{
- struct s3_i915_private *s3_priv = dev->s3_private;
- int i;
- uint16_t cr_index, cr_data, st01;
- struct vgaregmap regmap;
-
- regmap.addr = (uint8_t *)s3_priv->saveAddr;
- regmap.handle = s3_priv->saveHandle;
-
- /* VGA color palette registers */
- s3_priv->saveDACMASK = vga_reg_get8(&regmap, VGA_DACMASK);
- /* DACCRX automatically increments during read */
- vga_reg_put8(&regmap, VGA_DACRX, 0);
- /* Read 3 bytes of color data from each index */
- for (i = 0; i < 256 * 3; i++)
- s3_priv->saveDACDATA[i] = vga_reg_get8(&regmap, VGA_DACDATA);
-
- /* MSR bits */
- s3_priv->saveMSR = vga_reg_get8(&regmap, VGA_MSR_READ);
- if (s3_priv->saveMSR & VGA_MSR_CGA_MODE) {
- cr_index = VGA_CR_INDEX_CGA;
- cr_data = VGA_CR_DATA_CGA;
- st01 = VGA_ST01_CGA;
- } else {
- cr_index = VGA_CR_INDEX_MDA;
- cr_data = VGA_CR_DATA_MDA;
- st01 = VGA_ST01_MDA;
- }
-
- /* CRT controller regs */
- i915_write_indexed(&regmap, cr_index, cr_data, 0x11,
- i915_read_indexed(&regmap, cr_index, cr_data, 0x11) & (~0x80));
- for (i = 0; i <= 0x24; i++)
- s3_priv->saveCR[i] =
- i915_read_indexed(&regmap, cr_index, cr_data, i);
- /* Make sure we don't turn off CR group 0 writes */
- s3_priv->saveCR[0x11] &= ~0x80;
-
- /* Attribute controller registers */
- (void) vga_reg_get8(&regmap, st01);
- s3_priv->saveAR_INDEX = vga_reg_get8(&regmap, VGA_AR_INDEX);
- for (i = 0; i <= 0x14; i++)
- s3_priv->saveAR[i] = i915_read_ar(&regmap, st01, i, 0);
- (void) vga_reg_get8(&regmap, st01);
- vga_reg_put8(&regmap, VGA_AR_INDEX, s3_priv->saveAR_INDEX);
- (void) vga_reg_get8(&regmap, st01);
-
- /* Graphics controller registers */
- for (i = 0; i < 9; i++)
- s3_priv->saveGR[i] =
- i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, i);
-
- s3_priv->saveGR[0x10] =
- i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
- s3_priv->saveGR[0x11] =
- i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
- s3_priv->saveGR[0x18] =
- i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
-
- /* Sequencer registers */
- for (i = 0; i < 8; i++)
- s3_priv->saveSR[i] =
- i915_read_indexed(&regmap, VGA_SR_INDEX, VGA_SR_DATA, i);
-}
-
-static void
-i915_restore_vga(struct drm_device *dev)
-{
- struct s3_i915_private *s3_priv = dev->s3_private;
- int i;
- uint16_t cr_index, cr_data, st01;
- struct vgaregmap regmap;
-
- regmap.addr = (uint8_t *)s3_priv->saveAddr;
- regmap.handle = s3_priv->saveHandle;
-
- /*
- * I/O Address Select. This bit selects 3Bxh or 3Dxh as the
- * I/O address for the CRT Controller registers,
- * the Feature Control Register (FCR), and Input Status Register
- * 1 (ST01). Presently ignored (whole range is claimed), but
- * will "ignore" 3Bx for color configuration or 3Dx for monochrome.
- * Note that it is typical in AGP chipsets to shadow this bit
- * and properly steer I/O cycles to the proper bus for operation
- * where a MDA exists on another bus such as ISA.
- * 0 = Select 3Bxh I/O address (MDA emulation) (default).
- * 1 = Select 3Dxh I/O address (CGA emulation).
- */
- vga_reg_put8(&regmap, VGA_MSR_WRITE, s3_priv->saveMSR);
-
- if (s3_priv->saveMSR & VGA_MSR_CGA_MODE) {
- cr_index = VGA_CR_INDEX_CGA;
- cr_data = VGA_CR_DATA_CGA;
- st01 = VGA_ST01_CGA;
- } else {
- cr_index = VGA_CR_INDEX_MDA;
- cr_data = VGA_CR_DATA_MDA;
- st01 = VGA_ST01_MDA;
- }
-
- /* Sequencer registers, don't write SR07 */
- for (i = 0; i < 7; i++)
- i915_write_indexed(&regmap, VGA_SR_INDEX, VGA_SR_DATA, i,
- s3_priv->saveSR[i]);
- /* CRT controller regs */
- /* Enable CR group 0 writes */
- i915_write_indexed(&regmap, cr_index, cr_data,
- 0x11, s3_priv->saveCR[0x11]);
- for (i = 0; i <= 0x24; i++)
- i915_write_indexed(&regmap, cr_index,
- cr_data, i, s3_priv->saveCR[i]);
-
- /* Graphics controller regs */
- for (i = 0; i < 9; i++)
- i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, i,
- s3_priv->saveGR[i]);
-
- i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
- s3_priv->saveGR[0x10]);
- i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
- s3_priv->saveGR[0x11]);
- i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
- s3_priv->saveGR[0x18]);
-
- /* Attribute controller registers */
- (void) vga_reg_get8(&regmap, st01); /* switch back to index mode */
- for (i = 0; i <= 0x14; i++)
- i915_write_ar(&regmap, st01, i, s3_priv->saveAR[i], 0);
- (void) vga_reg_get8(&regmap, st01); /* switch back to index mode */
- vga_reg_put8(&regmap, VGA_AR_INDEX, s3_priv->saveAR_INDEX | 0x20);
- (void) vga_reg_get8(&regmap, st01); /* switch back to index mode */
-
- /* VGA color palette registers */
- vga_reg_put8(&regmap, VGA_DACMASK, s3_priv->saveDACMASK);
- /* DACCRX automatically increments during read */
- vga_reg_put8(&regmap, VGA_DACWX, 0);
- /* Read 3 bytes of color data from each index */
- for (i = 0; i < 256 * 3; i++)
- vga_reg_put8(&regmap, VGA_DACDATA, s3_priv->saveDACDATA[i]);
-}
-
-/**
- * i915_save_display - save display & mode info
- * @dev: DRM device
- *
- * Save mode timings and display info.
- */
-void i915_save_display(struct drm_device *dev)
-{
- struct s3_i915_private *s3_priv = dev->s3_private;
-
- /* Display arbitration control */
- s3_priv->saveDSPARB = S3_READ(DSPARB);
-
- /*
- * Pipe & plane A info.
- */
- s3_priv->savePIPEACONF = S3_READ(PIPEACONF);
- s3_priv->savePIPEASRC = S3_READ(PIPEASRC);
- s3_priv->saveFPA0 = S3_READ(FPA0);
- s3_priv->saveFPA1 = S3_READ(FPA1);
- s3_priv->saveDPLL_A = S3_READ(DPLL_A);
- if (IS_I965G(dev))
- s3_priv->saveDPLL_A_MD = S3_READ(DPLL_A_MD);
- s3_priv->saveHTOTAL_A = S3_READ(HTOTAL_A);
- s3_priv->saveHBLANK_A = S3_READ(HBLANK_A);
- s3_priv->saveHSYNC_A = S3_READ(HSYNC_A);
- s3_priv->saveVTOTAL_A = S3_READ(VTOTAL_A);
- s3_priv->saveVBLANK_A = S3_READ(VBLANK_A);
- s3_priv->saveVSYNC_A = S3_READ(VSYNC_A);
- s3_priv->saveBCLRPAT_A = S3_READ(BCLRPAT_A);
-
- s3_priv->saveDSPACNTR = S3_READ(DSPACNTR);
- s3_priv->saveDSPASTRIDE = S3_READ(DSPASTRIDE);
- s3_priv->saveDSPASIZE = S3_READ(DSPASIZE);
- s3_priv->saveDSPAPOS = S3_READ(DSPAPOS);
- s3_priv->saveDSPABASE = S3_READ(DSPABASE);
- if (IS_I965G(dev)) {
- s3_priv->saveDSPASURF = S3_READ(DSPASURF);
- s3_priv->saveDSPATILEOFF = S3_READ(DSPATILEOFF);
- }
- i915_save_palette(dev, PIPE_A);
- s3_priv->savePIPEASTAT = S3_READ(PIPEASTAT);
-
- /*
- * Pipe & plane B info
- */
- s3_priv->savePIPEBCONF = S3_READ(PIPEBCONF);
- s3_priv->savePIPEBSRC = S3_READ(PIPEBSRC);
- s3_priv->saveFPB0 = S3_READ(FPB0);
- s3_priv->saveFPB1 = S3_READ(FPB1);
- s3_priv->saveDPLL_B = S3_READ(DPLL_B);
- if (IS_I965G(dev))
- s3_priv->saveDPLL_B_MD = S3_READ(DPLL_B_MD);
- s3_priv->saveHTOTAL_B = S3_READ(HTOTAL_B);
- s3_priv->saveHBLANK_B = S3_READ(HBLANK_B);
- s3_priv->saveHSYNC_B = S3_READ(HSYNC_B);
- s3_priv->saveVTOTAL_B = S3_READ(VTOTAL_B);
- s3_priv->saveVBLANK_B = S3_READ(VBLANK_B);
- s3_priv->saveVSYNC_B = S3_READ(VSYNC_B);
- s3_priv->saveBCLRPAT_A = S3_READ(BCLRPAT_A);
-
- s3_priv->saveDSPBCNTR = S3_READ(DSPBCNTR);
- s3_priv->saveDSPBSTRIDE = S3_READ(DSPBSTRIDE);
- s3_priv->saveDSPBSIZE = S3_READ(DSPBSIZE);
- s3_priv->saveDSPBPOS = S3_READ(DSPBPOS);
- s3_priv->saveDSPBBASE = S3_READ(DSPBBASE);
- if (IS_I965GM(dev) || IS_GM45(dev)) {
- s3_priv->saveDSPBSURF = S3_READ(DSPBSURF);
- s3_priv->saveDSPBTILEOFF = S3_READ(DSPBTILEOFF);
- }
- i915_save_palette(dev, PIPE_B);
- s3_priv->savePIPEBSTAT = S3_READ(PIPEBSTAT);
-
- /*
- * CRT state
- */
- s3_priv->saveADPA = S3_READ(ADPA);
-
- /*
- * LVDS state
- */
- s3_priv->savePP_CONTROL = S3_READ(PP_CONTROL);
- s3_priv->savePFIT_PGM_RATIOS = S3_READ(PFIT_PGM_RATIOS);
- s3_priv->saveBLC_PWM_CTL = S3_READ(BLC_PWM_CTL);
- if (IS_I965G(dev))
- s3_priv->saveBLC_PWM_CTL2 = S3_READ(BLC_PWM_CTL2);
- if (IS_MOBILE(dev) && !IS_I830(dev))
- s3_priv->saveLVDS = S3_READ(LVDS);
- if (!IS_I830(dev) && !IS_845G(dev))
- s3_priv->savePFIT_CONTROL = S3_READ(PFIT_CONTROL);
- s3_priv->saveLVDSPP_ON = S3_READ(LVDSPP_ON);
- s3_priv->saveLVDSPP_OFF = S3_READ(LVDSPP_OFF);
- s3_priv->savePP_CYCLE = S3_READ(PP_CYCLE);
-
- /* FIXME: save TV & SDVO state */
-
- /* FBC state */
- s3_priv->saveFBC_CFB_BASE = S3_READ(FBC_CFB_BASE);
- s3_priv->saveFBC_LL_BASE = S3_READ(FBC_LL_BASE);
- s3_priv->saveFBC_CONTROL2 = S3_READ(FBC_CONTROL2);
- s3_priv->saveFBC_CONTROL = S3_READ(FBC_CONTROL);
-
- /* VGA state */
- s3_priv->saveVCLK_DIVISOR_VGA0 = S3_READ(VCLK_DIVISOR_VGA0);
- s3_priv->saveVCLK_DIVISOR_VGA1 = S3_READ(VCLK_DIVISOR_VGA1);
- s3_priv->saveVCLK_POST_DIV = S3_READ(VCLK_POST_DIV);
- s3_priv->saveVGACNTRL = S3_READ(VGACNTRL);
-
- i915_save_vga(dev);
-}
-
-void i915_restore_display(struct drm_device *dev)
-{
- struct s3_i915_private *s3_priv = dev->s3_private;
-
- S3_WRITE(DSPARB, s3_priv->saveDSPARB);
-
- /*
- * Pipe & plane A info
- * Prime the clock
- */
- if (s3_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
- S3_WRITE(DPLL_A, s3_priv->saveDPLL_A &
- ~DPLL_VCO_ENABLE);
- drv_usecwait(150);
- }
- S3_WRITE(FPA0, s3_priv->saveFPA0);
- S3_WRITE(FPA1, s3_priv->saveFPA1);
- /* Actually enable it */
- S3_WRITE(DPLL_A, s3_priv->saveDPLL_A);
- drv_usecwait(150);
- if (IS_I965G(dev))
- S3_WRITE(DPLL_A_MD, s3_priv->saveDPLL_A_MD);
- drv_usecwait(150);
-
- /* Restore mode */
- S3_WRITE(HTOTAL_A, s3_priv->saveHTOTAL_A);
- S3_WRITE(HBLANK_A, s3_priv->saveHBLANK_A);
- S3_WRITE(HSYNC_A, s3_priv->saveHSYNC_A);
- S3_WRITE(VTOTAL_A, s3_priv->saveVTOTAL_A);
- S3_WRITE(VBLANK_A, s3_priv->saveVBLANK_A);
- S3_WRITE(VSYNC_A, s3_priv->saveVSYNC_A);
- S3_WRITE(BCLRPAT_A, s3_priv->saveBCLRPAT_A);
-
- /* Restore plane info */
- S3_WRITE(DSPASIZE, s3_priv->saveDSPASIZE);
- S3_WRITE(DSPAPOS, s3_priv->saveDSPAPOS);
- S3_WRITE(PIPEASRC, s3_priv->savePIPEASRC);
- S3_WRITE(DSPABASE, s3_priv->saveDSPABASE);
- S3_WRITE(DSPASTRIDE, s3_priv->saveDSPASTRIDE);
- if (IS_I965G(dev)) {
- S3_WRITE(DSPASURF, s3_priv->saveDSPASURF);
- S3_WRITE(DSPATILEOFF, s3_priv->saveDSPATILEOFF);
- }
- S3_WRITE(PIPEACONF, s3_priv->savePIPEACONF);
- i915_restore_palette(dev, PIPE_A);
- /* Enable the plane */
- S3_WRITE(DSPACNTR, s3_priv->saveDSPACNTR);
- S3_WRITE(DSPABASE, S3_READ(DSPABASE));
-
- /* Pipe & plane B info */
- if (s3_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
- S3_WRITE(DPLL_B, s3_priv->saveDPLL_B &
- ~DPLL_VCO_ENABLE);
- drv_usecwait(150);
- }
- S3_WRITE(FPB0, s3_priv->saveFPB0);
- S3_WRITE(FPB1, s3_priv->saveFPB1);
- /* Actually enable it */
- S3_WRITE(DPLL_B, s3_priv->saveDPLL_B);
- drv_usecwait(150);
- if (IS_I965G(dev))
- S3_WRITE(DPLL_B_MD, s3_priv->saveDPLL_B_MD);
- drv_usecwait(150);
-
- /* Restore mode */
- S3_WRITE(HTOTAL_B, s3_priv->saveHTOTAL_B);
- S3_WRITE(HBLANK_B, s3_priv->saveHBLANK_B);
- S3_WRITE(HSYNC_B, s3_priv->saveHSYNC_B);
- S3_WRITE(VTOTAL_B, s3_priv->saveVTOTAL_B);
- S3_WRITE(VBLANK_B, s3_priv->saveVBLANK_B);
- S3_WRITE(VSYNC_B, s3_priv->saveVSYNC_B);
- S3_WRITE(BCLRPAT_B, s3_priv->saveBCLRPAT_B);
-
- /* Restore plane info */
- S3_WRITE(DSPBSIZE, s3_priv->saveDSPBSIZE);
- S3_WRITE(DSPBPOS, s3_priv->saveDSPBPOS);
- S3_WRITE(PIPEBSRC, s3_priv->savePIPEBSRC);
- S3_WRITE(DSPBBASE, s3_priv->saveDSPBBASE);
- S3_WRITE(DSPBSTRIDE, s3_priv->saveDSPBSTRIDE);
- if (IS_I965G(dev)) {
- S3_WRITE(DSPBSURF, s3_priv->saveDSPBSURF);
- S3_WRITE(DSPBTILEOFF, s3_priv->saveDSPBTILEOFF);
- }
- S3_WRITE(PIPEBCONF, s3_priv->savePIPEBCONF);
- i915_restore_palette(dev, PIPE_B);
- /* Enable the plane */
- S3_WRITE(DSPBCNTR, s3_priv->saveDSPBCNTR);
- S3_WRITE(DSPBBASE, S3_READ(DSPBBASE));
-
- /* CRT state */
- S3_WRITE(ADPA, s3_priv->saveADPA);
-
- /* LVDS state */
- if (IS_I965G(dev))
- S3_WRITE(BLC_PWM_CTL2, s3_priv->saveBLC_PWM_CTL2);
- if (IS_MOBILE(dev) && !IS_I830(dev))
- S3_WRITE(LVDS, s3_priv->saveLVDS);
- if (!IS_I830(dev) && !IS_845G(dev))
- S3_WRITE(PFIT_CONTROL, s3_priv->savePFIT_CONTROL);
-
- S3_WRITE(PFIT_PGM_RATIOS, s3_priv->savePFIT_PGM_RATIOS);
- S3_WRITE(BLC_PWM_CTL, s3_priv->saveBLC_PWM_CTL);
- S3_WRITE(LVDSPP_ON, s3_priv->saveLVDSPP_ON);
- S3_WRITE(LVDSPP_OFF, s3_priv->saveLVDSPP_OFF);
- S3_WRITE(PP_CYCLE, s3_priv->savePP_CYCLE);
- S3_WRITE(PP_CONTROL, s3_priv->savePP_CONTROL);
-
- /* FIXME: restore TV & SDVO state */
-
- /* FBC info */
- S3_WRITE(FBC_CFB_BASE, s3_priv->saveFBC_CFB_BASE);
- S3_WRITE(FBC_LL_BASE, s3_priv->saveFBC_LL_BASE);
- S3_WRITE(FBC_CONTROL2, s3_priv->saveFBC_CONTROL2);
- S3_WRITE(FBC_CONTROL, s3_priv->saveFBC_CONTROL);
-
- /* VGA state */
- S3_WRITE(VGACNTRL, s3_priv->saveVGACNTRL);
- S3_WRITE(VCLK_DIVISOR_VGA0, s3_priv->saveVCLK_DIVISOR_VGA0);
- S3_WRITE(VCLK_DIVISOR_VGA1, s3_priv->saveVCLK_DIVISOR_VGA1);
- S3_WRITE(VCLK_POST_DIV, s3_priv->saveVCLK_POST_DIV);
- drv_usecwait(150);
-
- i915_restore_vga(dev);
-}
-static int
-i915_resume(struct drm_device *dev)
-{
- ddi_acc_handle_t conf_hdl;
- struct s3_i915_private *s3_priv = dev->s3_private;
- int i;
-
- if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) {
- DRM_ERROR(("i915_resume: pci_config_setup fail"));
- return (DDI_FAILURE);
- }
- /*
- * Nexus driver will resume pci config space and set the power state
- * for its children. So we needn't resume them explicitly here.
- * see pci_pre_resume for detail.
- */
- pci_config_put8(conf_hdl, LBB, s3_priv->saveLBB);
-
- if (IS_I965G(dev) && IS_MOBILE(dev))
- S3_WRITE(MCHBAR_RENDER_STANDBY, s3_priv->saveRENDERSTANDBY);
- if (IS_I965GM(dev))
- (void) S3_READ(MCHBAR_RENDER_STANDBY);
-
- S3_WRITE(HWS_PGA, s3_priv->saveHWS);
- if (IS_I965GM(dev))
- (void) S3_READ(HWS_PGA);
-
- i915_restore_display(dev);
-
- /* Clock gating state */
- S3_WRITE (D_STATE, s3_priv->saveD_STATE);
- S3_WRITE (CG_2D_DIS, s3_priv->saveCG_2D_DIS);
-
- /* Cache mode state */
- S3_WRITE (CACHE_MODE_0, s3_priv->saveCACHE_MODE_0 | 0xffff0000);
-
- /* Memory arbitration state */
- S3_WRITE (MI_ARB_STATE, s3_priv->saveMI_ARB_STATE | 0xffff0000);
-
- for (i = 0; i < 16; i++) {
- S3_WRITE(SWF0 + (i << 2), s3_priv->saveSWF0[i]);
- S3_WRITE(SWF10 + (i << 2), s3_priv->saveSWF1[i]);
- }
- for (i = 0; i < 3; i++)
- S3_WRITE(SWF30 + (i << 2), s3_priv->saveSWF2[i]);
-
- S3_WRITE(I915REG_PGTBL_CTRL, s3_priv->pgtbl_ctl);
-
- (void) pci_config_teardown(&conf_hdl);
-
- drm_agp_rebind(dev);
-
- return (DDI_SUCCESS);
-}
-
-static int
-i915_suspend(struct drm_device *dev)
-{
- ddi_acc_handle_t conf_hdl;
- struct s3_i915_private *s3_priv = dev->s3_private;
- int i;
-
- if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) {
- DRM_ERROR(("i915_suspend: pci_config_setup fail"));
- return (DDI_FAILURE);
- }
-
- /*
- * Nexus driver will resume pci config space for its children.
- * So pci config registers are not saved here.
- */
- s3_priv->saveLBB = pci_config_get8(conf_hdl, LBB);
-
- if (IS_I965G(dev) && IS_MOBILE(dev))
- s3_priv->saveRENDERSTANDBY = S3_READ(MCHBAR_RENDER_STANDBY);
-
- /* Hardware status page */
- s3_priv->saveHWS = S3_READ(HWS_PGA);
-
- i915_save_display(dev);
-
- /* Interrupt state */
- s3_priv->saveIIR = S3_READ(IIR);
- s3_priv->saveIER = S3_READ(IER);
- s3_priv->saveIMR = S3_READ(IMR);
-
- /* Clock gating state */
- s3_priv->saveD_STATE = S3_READ(D_STATE);
- s3_priv->saveCG_2D_DIS = S3_READ(CG_2D_DIS);
-
- /* Cache mode state */
- s3_priv->saveCACHE_MODE_0 = S3_READ(CACHE_MODE_0);
-
- /* Memory Arbitration state */
- s3_priv->saveMI_ARB_STATE = S3_READ(MI_ARB_STATE);
-
- /* Scratch space */
- for (i = 0; i < 16; i++) {
- s3_priv->saveSWF0[i] = S3_READ(SWF0 + (i << 2));
- s3_priv->saveSWF1[i] = S3_READ(SWF10 + (i << 2));
- }
- for (i = 0; i < 3; i++)
- s3_priv->saveSWF2[i] = S3_READ(SWF30 + (i << 2));
-
- /*
- * Save page table control register
- */
- s3_priv->pgtbl_ctl = S3_READ(I915REG_PGTBL_CTRL);
-
- (void) pci_config_teardown(&conf_hdl);
-
- return (DDI_SUCCESS);
-}
-
-/*
- * This funtion check the length of memory mapped IO space to get the right bar. * And There are two possibilities here.
- * 1. The MMIO registers is in memory map IO bar with 1M size. The bottom half
- * of the 1M space is the MMIO registers.
- * 2. The MMIO register is in memory map IO with 512K size. The whole 512K
- * space is the MMIO registers.
- */
-static int
-i915_map_regs(dev_info_t *dip, caddr_t *save_addr, ddi_acc_handle_t *handlep)
-{
- int rnumber;
- int nregs;
- off_t size = 0;
-
- if (ddi_dev_nregs(dip, &nregs)) {
- cmn_err(CE_WARN, "i915_map_regs: failed to get nregs");
- return (DDI_FAILURE);
- }
-
- for (rnumber = 1; rnumber < nregs; rnumber++) {
- (void) ddi_dev_regsize(dip, rnumber, &size);
- if ((size == 0x80000) ||
- (size == 0x100000) ||
- (size == 0x400000))
- break;
- }
-
- if (rnumber >= nregs) {
- cmn_err(CE_WARN,
- "i915_map_regs: failed to find MMIO registers");
- return (DDI_FAILURE);
- }
-
- if (ddi_regs_map_setup(dip, rnumber, save_addr,
- 0, 0x80000, &s3_attr, handlep)) {
- cmn_err(CE_WARN,
- "i915_map_regs: failed to map bar %d", rnumber);
- return (DDI_FAILURE);
- }
-
- return (DDI_SUCCESS);
-}
-static void
-i915_unmap_regs(ddi_acc_handle_t *handlep)
-{
- ddi_regs_map_free(handlep);
-}
-static int
-i915_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
-{
- drm_device_t *statep;
- s3_i915_private_t *s3_private;
- void *handle;
- int unit;
-
- unit = ddi_get_instance(dip);
- switch (cmd) {
- case DDI_ATTACH:
- break;
- case DDI_RESUME:
- statep = ddi_get_soft_state(i915_statep, unit);
- return (i915_resume(statep));
- default:
- DRM_ERROR("i915_attach: attach and resume ops are supported");
- return (DDI_FAILURE);
-
- }
-
- if (ddi_soft_state_zalloc(i915_statep, unit) != DDI_SUCCESS) {
- cmn_err(CE_WARN,
- "i915_attach: failed to alloc softstate");
- return (DDI_FAILURE);
- }
- statep = ddi_get_soft_state(i915_statep, unit);
- statep->dip = dip;
- statep->driver = &i915_driver;
-
- statep->s3_private = drm_alloc(sizeof(s3_i915_private_t),
- DRM_MEM_DRIVER);
-
- if (statep->s3_private == NULL) {
- cmn_err(CE_WARN, "i915_attach: failed to allocate s3 priv");
- goto err_exit1;
- }
-
- /*
- * Map in the mmio register space for s3.
- */
- s3_private = (s3_i915_private_t *)statep->s3_private;
-
- if (i915_map_regs(dip, &s3_private->saveAddr,
- &s3_private->saveHandle)) {
- cmn_err(CE_WARN, "i915_attach: failed to map MMIO");
- goto err_exit2;
- }
-
- /*
- * Call drm_supp_register to create minor nodes for us
- */
- handle = drm_supp_register(dip, statep);
- if ( handle == NULL) {
- DRM_ERROR("i915_attach: drm_supp_register failed");
- goto err_exit3;
- }
- statep->drm_handle = handle;
-
- /*
- * After drm_supp_register, we can call drm_xxx routine
- */
- statep->drm_supported = DRM_UNSUPPORT;
- if (
- drm_probe(statep, i915_pciidlist) != DDI_SUCCESS) {
- DRM_ERROR("i915_open: "
- "DRM current don't support this graphics card");
- goto err_exit4;
- }
- statep->drm_supported = DRM_SUPPORT;
-
- /* call common attach code */
- if (drm_attach(statep) != DDI_SUCCESS) {
- DRM_ERROR("i915_attach: drm_attach failed");
- goto err_exit4;
- }
- return (DDI_SUCCESS);
-err_exit4:
- (void) drm_supp_unregister(handle);
-err_exit3:
- i915_unmap_regs(&s3_private->saveHandle);
-err_exit2:
- drm_free(statep->s3_private, sizeof(s3_i915_private_t),
- DRM_MEM_DRIVER);
-err_exit1:
- (void) ddi_soft_state_free(i915_statep, unit);
-
- return (DDI_FAILURE);
-
-} /* i915_attach() */
-
-static int
-i915_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
-{
- drm_device_t *statep;
- int unit;
- s3_i915_private_t *s3_private;
-
- if ((cmd != DDI_SUSPEND) && (cmd != DDI_DETACH)) {
- DRM_ERROR("i915_detach: "
- "only detach and resume ops are supported");
- return (DDI_FAILURE);
- }
-
- unit = ddi_get_instance(dip);
- statep = ddi_get_soft_state(i915_statep, unit);
- if (statep == NULL) {
- DRM_ERROR("i915_detach: can not get soft state");
- return (DDI_FAILURE);
- }
-
- if (cmd == DDI_SUSPEND)
- return (i915_suspend(statep));
-
- s3_private = (s3_i915_private_t *)statep->s3_private;
- ddi_regs_map_free(&s3_private->saveHandle);
-
- /*
- * Free the struct for context saving in S3
- */
- drm_free(statep->s3_private, sizeof(s3_i915_private_t),
- DRM_MEM_DRIVER);
-
- (void) drm_detach(statep);
- (void) drm_supp_unregister(statep->drm_handle);
- (void) ddi_soft_state_free(i915_statep, unit);
-
- return (DDI_SUCCESS);
-
-} /* i915_detach() */
-
-
-/*ARGSUSED*/
-static int
-i915_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
-{
- drm_device_t *statep;
- int error = DDI_SUCCESS;
- int unit;
-
- unit = drm_dev_to_instance((dev_t)arg);
- switch (infocmd) {
- case DDI_INFO_DEVT2DEVINFO:
- statep = ddi_get_soft_state(i915_statep, unit);
- if (statep == NULL || statep->dip == NULL) {
- error = DDI_FAILURE;
- } else {
- *result = (void *) statep->dip;
- error = DDI_SUCCESS;
- }
- break;
- case DDI_INFO_DEVT2INSTANCE:
- *result = (void *)(uintptr_t)unit;
- error = DDI_SUCCESS;
- break;
- default:
- error = DDI_FAILURE;
- break;
- }
- return (error);
-
-} /* i915_info() */
-
-
-static void i915_configure(drm_driver_t *driver)
-{
- driver->buf_priv_size = 1; /* No dev_priv */
- driver->load = i915_driver_load;
- driver->unload = i915_driver_unload;
- driver->open = i915_driver_open;
- driver->preclose = i915_driver_preclose;
- driver->postclose = i915_driver_postclose;
- driver->lastclose = i915_driver_lastclose;
- driver->device_is_agp = i915_driver_device_is_agp;
- driver->enable_vblank = i915_enable_vblank;
- driver->disable_vblank = i915_disable_vblank;
- driver->irq_preinstall = i915_driver_irq_preinstall;
- driver->irq_postinstall = i915_driver_irq_postinstall;
- driver->irq_uninstall = i915_driver_irq_uninstall;
- driver->irq_handler = i915_driver_irq_handler;
-
- driver->gem_init_object = i915_gem_init_object;
- driver->gem_free_object = i915_gem_free_object;
-
- driver->driver_ioctls = i915_ioctls;
- driver->max_driver_ioctl = i915_max_ioctl;
-
- driver->driver_name = DRIVER_NAME;
- driver->driver_desc = DRIVER_DESC;
- driver->driver_date = DRIVER_DATE;
- driver->driver_major = DRIVER_MAJOR;
- driver->driver_minor = DRIVER_MINOR;
- driver->driver_patchlevel = DRIVER_PATCHLEVEL;
-
- driver->use_agp = 1;
- driver->require_agp = 1;
- driver->use_irq = 1;
-}
-
-static int i915_quiesce(dev_info_t *dip)
-{
- drm_device_t *statep;
- int unit;
-
- unit = ddi_get_instance(dip);
- statep = ddi_get_soft_state(i915_statep, unit);
- if (statep == NULL) {
- return (DDI_FAILURE);
- }
- i915_driver_irq_uninstall(statep);
-
- return (DDI_SUCCESS);
-}
diff --git a/usr/src/uts/intel/io/drm/i915_drv.h b/usr/src/uts/intel/io/drm/i915_drv.h
deleted file mode 100644
index ccf4bd6..0000000
--- a/usr/src/uts/intel/io/drm/i915_drv.h
+++ /dev/null
@@ -1,1842 +0,0 @@
-/* BEGIN CSTYLED */
-
-/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
- */
-/*
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * Copyright (c) 2009, Intel Corporation.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _I915_DRV_H
-#define _I915_DRV_H
-
-/* General customization:
- */
-
-#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
-
-#define DRIVER_NAME "i915"
-#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20080730"
-
-#if defined(__SVR4) && defined(__sun)
-#define spinlock_t kmutex_t
-#endif
-
-#define I915_NUM_PIPE 2
-
-#define I915_NUM_PIPE 2
-
-/* Interface history:
- *
- * 1.1: Original.
- * 1.2: Add Power Management
- * 1.3: Add vblank support
- * 1.4: Fix cmdbuffer path, add heap destroy
- * 1.5: Add vblank pipe configuration
- * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
- * - Support vertical blank on secondary display pipe
- */
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 6
-#define DRIVER_PATCHLEVEL 0
-
-#if defined(__linux__)
-#define I915_HAVE_FENCE
-#define I915_HAVE_BUFFER
-#endif
-#define I915_HAVE_GEM 1
-
-typedef struct _drm_i915_ring_buffer {
- int tail_mask;
- unsigned long Size;
- u8 *virtual_start;
- int head;
- int tail;
- int space;
- drm_local_map_t map;
- struct drm_gem_object *ring_obj;
-} drm_i915_ring_buffer_t;
-
-struct mem_block {
- struct mem_block *next;
- struct mem_block *prev;
- int start;
- int size;
- drm_file_t *filp; /* 0: free, -1: heap, other: real files */
-};
-
-typedef struct _drm_i915_vbl_swap {
- struct list_head head;
- drm_drawable_t drw_id;
- unsigned int plane;
- unsigned int sequence;
- int flip;
-} drm_i915_vbl_swap_t;
-
-typedef struct s3_i915_private {
- ddi_acc_handle_t saveHandle;
- caddr_t saveAddr;
- uint32_t pgtbl_ctl;
- uint8_t saveLBB;
- uint32_t saveDSPACNTR;
- uint32_t saveDSPBCNTR;
- uint32_t saveDSPARB;
- uint32_t saveRENDERSTANDBY;
- uint32_t saveHWS;
- uint32_t savePIPEACONF;
- uint32_t savePIPEBCONF;
- uint32_t savePIPEASRC;
- uint32_t savePIPEBSRC;
- uint32_t saveFPA0;
- uint32_t saveFPA1;
- uint32_t saveDPLL_A;
- uint32_t saveDPLL_A_MD;
- uint32_t saveHTOTAL_A;
- uint32_t saveHBLANK_A;
- uint32_t saveHSYNC_A;
- uint32_t saveVTOTAL_A;
- uint32_t saveVBLANK_A;
- uint32_t saveVSYNC_A;
- uint32_t saveBCLRPAT_A;
- uint32_t saveDSPASTRIDE;
- uint32_t saveDSPASIZE;
- uint32_t saveDSPAPOS;
- uint32_t saveDSPABASE;
- uint32_t saveDSPASURF;
- uint32_t saveDSPATILEOFF;
- uint32_t savePFIT_PGM_RATIOS;
- uint32_t saveBLC_PWM_CTL;
- uint32_t saveBLC_PWM_CTL2;
- uint32_t saveFPB0;
- uint32_t saveFPB1;
- uint32_t saveDPLL_B;
- uint32_t saveDPLL_B_MD;
- uint32_t saveHTOTAL_B;
- uint32_t saveHBLANK_B;
- uint32_t saveHSYNC_B;
- uint32_t saveVTOTAL_B;
- uint32_t saveVBLANK_B;
- uint32_t saveVSYNC_B;
- uint32_t saveBCLRPAT_B;
- uint32_t saveDSPBSTRIDE;
- uint32_t saveDSPBSIZE;
- uint32_t saveDSPBPOS;
- uint32_t saveDSPBBASE;
- uint32_t saveDSPBSURF;
- uint32_t saveDSPBTILEOFF;
- uint32_t saveVCLK_DIVISOR_VGA0;
- uint32_t saveVCLK_DIVISOR_VGA1;
- uint32_t saveVCLK_POST_DIV;
- uint32_t saveVGACNTRL;
- uint32_t saveADPA;
- uint32_t saveLVDS;
- uint32_t saveLVDSPP_ON;
- uint32_t saveLVDSPP_OFF;
- uint32_t saveDVOA;
- uint32_t saveDVOB;
- uint32_t saveDVOC;
- uint32_t savePP_ON;
- uint32_t savePP_OFF;
- uint32_t savePP_CONTROL;
- uint32_t savePP_CYCLE;
- uint32_t savePFIT_CONTROL;
- uint32_t save_palette_a[256];
- uint32_t save_palette_b[256];
- uint32_t saveFBC_CFB_BASE;
- uint32_t saveFBC_LL_BASE;
- uint32_t saveFBC_CONTROL;
- uint32_t saveFBC_CONTROL2;
- uint32_t saveIER;
- uint32_t saveIIR;
- uint32_t saveIMR;
- uint32_t saveD_STATE;
- uint32_t saveCG_2D_DIS;
- uint32_t saveMI_ARB_STATE;
- uint32_t savePIPEASTAT;
- uint32_t savePIPEBSTAT;
- uint32_t saveCACHE_MODE_0;
- uint32_t saveSWF0[16];
- uint32_t saveSWF1[16];
- uint32_t saveSWF2[3];
- uint8_t saveMSR;
- uint8_t saveSR[8];
- uint8_t saveGR[25];
- uint8_t saveAR_INDEX;
- uint8_t saveAR[21];
- uint8_t saveDACMASK;
- uint8_t saveDACDATA[256*3]; /* 256 3-byte colors */
- uint8_t saveCR[37];
-} s3_i915_private_t;
-
-struct drm_i915_error_state {
- u32 eir;
- u32 pgtbl_er;
- u32 pipeastat;
- u32 pipebstat;
- u32 ipeir;
- u32 ipehr;
- u32 instdone;
- u32 acthd;
- u32 instpm;
- u32 instps;
- u32 instdone1;
- u32 seqno;
- struct timeval time;
-};
-
-typedef struct drm_i915_private {
- struct drm_device *dev;
-
- drm_local_map_t *sarea;
- drm_local_map_t *mmio_map;
-
- drm_i915_sarea_t *sarea_priv;
- drm_i915_ring_buffer_t ring;
-
- drm_dma_handle_t *status_page_dmah;
- void *hw_status_page;
- dma_addr_t dma_status_page;
- uint32_t counter;
- unsigned int status_gfx_addr;
- drm_local_map_t hws_map;
- struct drm_gem_object *hws_obj;
-
- unsigned int cpp;
- int back_offset;
- int front_offset;
- int current_page;
- int page_flipping;
-
- wait_queue_head_t irq_queue;
- atomic_t irq_received;
- /** Protects user_irq_refcount and irq_mask_reg */
- spinlock_t user_irq_lock;
- /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
- int user_irq_refcount;
- /** Cached value of IMR to avoid reads in updating the bitfield */
- int irq_mask_reg;
- uint32_t pipestat[2];
- /** splitted irq regs for graphics and display engine on IGDNG,
- irq_mask_reg is still used for display irq. */
- u32 gt_irq_mask_reg;
- u32 gt_irq_enable_reg;
- u32 de_irq_enable_reg;
-
- int tex_lru_log_granularity;
- int allow_batchbuffer;
- struct mem_block *agp_heap;
- unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
- int vblank_pipe;
-
- spinlock_t error_lock;
- struct drm_i915_error_state *first_error;
-
- struct {
- struct drm_mm gtt_space;
-
- drm_local_map_t gtt_mapping;
- /**
- * List of objects currently involved in rendering from the
- * ringbuffer.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head active_list;
-
- /**
- * List of objects which are not in the ringbuffer but which
- * still have a write_domain which needs to be flushed before
- * unbinding.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head flushing_list;
-
- /**
- * LRU list of objects which are not in the ringbuffer and
- * are ready to unbind, but are still in the GTT.
- *
- * A reference is not held on the buffer while on this list,
- * as merely being GTT-bound shouldn't prevent its being
- * freed, and we'll pull it off the list in the free path.
- */
- struct list_head inactive_list;
-
- /**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head request_list;
-
- uint32_t next_gem_seqno;
-
- /**
- * Waiting sequence number, if any
- */
- uint32_t waiting_gem_seqno;
-
- /**
- * Last seq seen at irq time
- */
- uint32_t irq_gem_seqno;
-
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int suspended;
-
- /**
- * Flag if the hardware appears to be wedged.
- *
- * This is set when attempts to idle the device timeout.
- * It prevents command submission from occuring and makes
- * every pending request fail
- */
- int wedged;
-
- /** Bit 6 swizzling required for X tiling */
- uint32_t bit_6_swizzle_x;
- /** Bit 6 swizzling required for Y tiling */
- uint32_t bit_6_swizzle_y;
- } mm;
-
-} drm_i915_private_t;
-
-struct drm_track {
- struct drm_track *next, *prev;
- caddr_t contain_ptr;
- struct drm_gem_object *obj;
- uint32_t name;
- uint64_t offset;
-
-};
-
-/** driver private structure attached to each drm_gem_object */
-struct drm_i915_gem_object {
- /** This object's place on the active/flushing/inactive lists */
- struct list_head list;
-
- struct drm_gem_object *obj;
-
- /** Current space allocated to this object in the GTT, if any. */
- struct drm_mm_node *gtt_space;
-
-
- /**
- * This is set if the object is on the active or flushing lists
- * (has pending rendering), and is not set if it's on inactive (ready
- * to be unbound).
- */
- int active;
-
- /**
- * This is set if the object has been written to since last bound
- * to the GTT
- */
- int dirty;
-
- /** AGP memory structure for our GTT binding. */
- int agp_mem;
-
- caddr_t *page_list;
-
- pfn_t *pfnarray;
- /**
- * Current offset of the object in GTT space.
- *
- * This is the same as gtt_space->start
- */
- uint32_t gtt_offset;
-
- /** Boolean whether this object has a valid gtt offset. */
- int gtt_bound;
-
- /** How many users have pinned this object in GTT space */
- int pin_count;
-
- /** Breadcrumb of last rendering to the buffer. */
- uint32_t last_rendering_seqno;
-
- /** Current tiling mode for the object. */
- uint32_t tiling_mode;
- uint32_t stride;
- /**
- * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
- * GEM_DOMAIN_CPU is not in the object's read domain.
- */
- uint8_t *page_cpu_valid;
- /** User space pin count and filp owning the pin */
- uint32_t user_pin_count;
- struct drm_file *pin_filp;
- /**
- * Used for checking the object doesn't appear more than once
- * in an execbuffer object list.
- */
- int in_execbuffer;
-};
-
-/**
- * Request queue structure.
- *
- * The request queue allows us to note sequence numbers that have been emitted
- * and may be associated with active buffers to be retired.
- *
- * By keeping this list, we can avoid having to do questionable
- * sequence-number comparisons on buffer last_rendering_seqnos, and associate
- * an emission time with seqnos for tracking how far ahead of the GPU we are.
- */
-struct drm_i915_gem_request {
- struct list_head list;
-
- /** GEM sequence number associated with this request. */
- uint32_t seqno;
-
- /** Time at which this request was emitted, in jiffies. */
- unsigned long emitted_jiffies;
-
- /** Cache domains that were flushed at the start of the request. */
- uint32_t flush_domains;
-
-};
-
-struct drm_i915_file_private {
- struct {
- uint32_t last_gem_seqno;
- uint32_t last_gem_throttle_seqno;
- } mm;
-};
-
-
-enum intel_chip_family {
- CHIP_I8XX = 0x01,
- CHIP_I9XX = 0x02,
- CHIP_I915 = 0x04,
- CHIP_I965 = 0x08,
-};
-
-extern drm_ioctl_desc_t i915_ioctls[];
-extern int i915_max_ioctl;
-extern void i915_save_display(struct drm_device *dev);
-extern void i915_restore_display(struct drm_device *dev);
-
- /* i915_dma.c */
-extern void i915_kernel_lost_context(drm_device_t * dev);
-extern int i915_driver_load(struct drm_device *, unsigned long flags);
-extern int i915_driver_unload(struct drm_device *dev);
-extern int i915_driver_open(drm_device_t * dev, drm_file_t *file_priv);
-extern void i915_driver_lastclose(drm_device_t * dev);
-extern void i915_driver_preclose(drm_device_t * dev, drm_file_t *filp);
-extern void i915_driver_postclose(drm_device_t * dev,
- struct drm_file *file_priv);
-extern int i915_driver_device_is_agp(drm_device_t * dev);
-extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
-extern int i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect __user *boxes,
- int i, int DR1, int DR4);
-extern void i915_emit_breadcrumb(struct drm_device *dev);
-extern void i915_emit_mi_flush(drm_device_t *dev, uint32_t flush);
-extern void i915_handle_error(struct drm_device *dev);
-
-/* i915_irq.c */
-extern int i915_irq_emit(DRM_IOCTL_ARGS);
-extern int i915_irq_wait(DRM_IOCTL_ARGS);
-
-extern int i915_enable_vblank(struct drm_device *dev, int crtc);
-extern void i915_disable_vblank(struct drm_device *dev, int crtc);
-extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
-extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
-extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
-extern int i915_driver_irq_preinstall(drm_device_t * dev);
-extern void i915_driver_irq_postinstall(drm_device_t * dev);
-extern void i915_driver_irq_uninstall(drm_device_t * dev);
-extern int i915_emit_irq(drm_device_t * dev);
-extern int i915_vblank_swap(DRM_IOCTL_ARGS);
-extern void i915_user_irq_on(drm_device_t * dev);
-extern void i915_user_irq_off(drm_device_t * dev);
-extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS);
-extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS);
-
-/* i915_mem.c */
-extern int i915_mem_alloc(DRM_IOCTL_ARGS);
-extern int i915_mem_free(DRM_IOCTL_ARGS);
-extern int i915_mem_init_heap(DRM_IOCTL_ARGS);
-extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS);
-extern void i915_mem_takedown(struct mem_block **heap);
-extern void i915_mem_release(drm_device_t * dev,
- drm_file_t *filp, struct mem_block *heap);
-extern struct mem_block **get_heap(drm_i915_private_t *, int);
-extern struct mem_block *find_block_by_proc(struct mem_block *, drm_file_t *);
-extern void mark_block(drm_device_t *, struct mem_block *, int);
-extern void free_block(struct mem_block *);
-
-/* i915_gem.c */
-int i915_gem_init_ioctl(DRM_IOCTL_ARGS);
-int i915_gem_create_ioctl(DRM_IOCTL_ARGS);
-int i915_gem_pread_ioctl(DRM_IOCTL_ARGS);
-int i915_gem_pwrite_ioctl(DRM_IOCTL_ARGS);
-int i915_gem_mmap_ioctl(DRM_IOCTL_ARGS);
-int i915_gem_set_domain_ioctl(DRM_IOCTL_ARGS);
-int i915_gem_sw_finish_ioctl(DRM_IOCTL_ARGS);
-int i915_gem_execbuffer(DRM_IOCTL_ARGS);
-int i915_gem_pin_ioctl(DRM_IOCTL_ARGS);
-int i915_gem_unpin_ioctl(DRM_IOCTL_ARGS);
-int i915_gem_busy_ioctl(DRM_IOCTL_ARGS);
-int i915_gem_throttle_ioctl(DRM_IOCTL_ARGS);
-int i915_gem_entervt_ioctl(DRM_IOCTL_ARGS);
-int i915_gem_leavevt_ioctl(DRM_IOCTL_ARGS);
-int i915_gem_set_tiling(DRM_IOCTL_ARGS);
-int i915_gem_get_tiling(DRM_IOCTL_ARGS);
-int i915_gem_get_aperture_ioctl(DRM_IOCTL_ARGS);
-void i915_gem_load(struct drm_device *dev);
-int i915_gem_init_object(struct drm_gem_object *obj);
-void i915_gem_free_object(struct drm_gem_object *obj);
-int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
-void i915_gem_object_unpin(struct drm_gem_object *obj);
-int i915_gem_object_unbind(struct drm_gem_object *obj, uint32_t type);
-void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev);
-void i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_work_handler(void *dev);
-void i915_gem_clflush_object(struct drm_gem_object *obj);
-int i915_gem_init_ringbuffer(struct drm_device *dev);
-
-/* i915_gem_tiling.c */
-void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-
-/* i915_gem_debug.c */
-void i915_gem_command_decode(uint32_t *data, int count,
- uint32_t hw_offset, struct drm_device *dev);
-/* i915_gem_regdump.c */
-int i915_reg_dump_show(struct drm_device *dev, void *v);
-#ifdef I915_HAVE_FENCE
-/* i915_fence.c */
-
-
-extern void i915_fence_handler(drm_device_t *dev);
-extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t class,
- uint32_t flags,
- uint32_t *sequence,
- uint32_t *native_type);
-extern void i915_poke_flush(drm_device_t *dev, uint32_t class);
-extern int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags);
-#endif
-
-#ifdef I915_HAVE_BUFFER
-/* i915_buffer.c */
-extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev);
-extern int i915_fence_types(drm_buffer_object_t *bo, uint32_t *class, uint32_t *type);
-extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
-extern int i915_init_mem_type(drm_device_t *dev, uint32_t type,
- drm_mem_type_manager_t *man);
-extern uint32_t i915_evict_mask(drm_buffer_object_t *bo);
-extern int i915_move(drm_buffer_object_t *bo, int evict,
- int no_wait, drm_bo_mem_reg_t *new_mem);
-
-#endif
-
-#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
-#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
-#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
-#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
-#define S3_READ(reg) \
- *(uint32_t volatile *)((uintptr_t)s3_priv->saveAddr + (reg))
-#define S3_WRITE(reg, val) \
- *(uint32_t volatile *)((uintptr_t)s3_priv->saveAddr + (reg)) = (val)
-
-#define I915_VERBOSE 0
-#define I915_RING_VALIDATE 0
-
-#if I915_RING_VALIDATE
-void i915_ring_validate(struct drm_device *dev, const char *func, int line);
-#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__)
-#else
-#define I915_RING_DO_VALIDATE(dev)
-#endif
-
-#define RING_LOCALS unsigned int outring, ringmask, outcount; \
- volatile unsigned char *virt;
-
-
-#define I915_RING_VALIDATE 0
-
-#if I915_RING_VALIDATE
-void i915_ring_validate(struct drm_device *dev, const char *func, int line);
-#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__)
-#else
-#define I915_RING_DO_VALIDATE(dev)
-#endif
-
-#if I915_VERBOSE
-#define BEGIN_LP_RING(n) do { \
- DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
- DRM_DEBUG("dev_priv->ring.virtual_start (%lx)\n", (dev_priv->ring.virtual_start)); \
- I915_RING_DO_VALIDATE(dev); \
- if (dev_priv->ring.space < (n)*4) \
- (void) i915_wait_ring(dev, (n)*4, __FUNCTION__); \
- outcount = 0; \
- outring = dev_priv->ring.tail; \
- ringmask = dev_priv->ring.tail_mask; \
- virt = dev_priv->ring.virtual_start; \
-} while (*"\0")
-#else
-#define BEGIN_LP_RING(n) do { \
- I915_RING_DO_VALIDATE(dev); \
- if (dev_priv->ring.space < (n)*4) \
- (void) i915_wait_ring(dev, (n)*4, __FUNCTION__); \
- outcount = 0; \
- outring = dev_priv->ring.tail; \
- ringmask = dev_priv->ring.tail_mask; \
- virt = dev_priv->ring.virtual_start; \
-} while (*"\0")
-#endif
-
-#if I915_VERBOSE
-#define OUT_RING(n) do { \
- DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
- *(volatile unsigned int *)(void *)(virt + outring) = (n); \
- outcount++; \
- outring += 4; \
- outring &= ringmask; \
-} while (*"\0")
-#else
-#define OUT_RING(n) do { \
- *(volatile unsigned int *)(void *)(virt + outring) = (n); \
- outcount++; \
- outring += 4; \
- outring &= ringmask; \
-} while (*"\0")
-#endif
-
-#if I915_VERBOSE
-#define ADVANCE_LP_RING() do { \
- DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
- I915_RING_DO_VALIDATE(dev); \
- dev_priv->ring.tail = outring; \
- dev_priv->ring.space -= outcount * 4; \
- I915_WRITE(PRB0_TAIL, outring); \
-} while (*"\0")
-#else
-#define ADVANCE_LP_RING() do { \
- I915_RING_DO_VALIDATE(dev); \
- dev_priv->ring.tail = outring; \
- dev_priv->ring.space -= outcount * 4; \
- I915_WRITE(PRB0_TAIL, outring); \
-} while (*"\0")
-#endif
-
-extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
-
-/* Extended config space */
-#define LBB 0xf4
-#define GDRST 0xc0
-#define GDRST_FULL (0<<2)
-#define GDRST_RENDER (1<<2)
-#define GDRST_MEDIA (3<<2)
-
-/* VGA stuff */
-
-#define VGA_ST01_MDA 0x3ba
-#define VGA_ST01_CGA 0x3da
-
-#define VGA_MSR_WRITE 0x3c2
-#define VGA_MSR_READ 0x3cc
-#define VGA_MSR_MEM_EN (1<<1)
-#define VGA_MSR_CGA_MODE (1<<0)
-
-#define VGA_SR_INDEX 0x3c4
-#define VGA_SR_DATA 0x3c5
-
-#define VGA_AR_INDEX 0x3c0
-#define VGA_AR_VID_EN (1<<5)
-#define VGA_AR_DATA_WRITE 0x3c0
-#define VGA_AR_DATA_READ 0x3c1
-
-#define VGA_GR_INDEX 0x3ce
-#define VGA_GR_DATA 0x3cf
-/* GR05 */
-#define VGA_GR_MEM_READ_MODE_SHIFT 3
-#define VGA_GR_MEM_READ_MODE_PLANE 1
-/* GR06 */
-#define VGA_GR_MEM_MODE_MASK 0xc
-#define VGA_GR_MEM_MODE_SHIFT 2
-#define VGA_GR_MEM_A0000_AFFFF 0
-#define VGA_GR_MEM_A0000_BFFFF 1
-#define VGA_GR_MEM_B0000_B7FFF 2
-#define VGA_GR_MEM_B0000_BFFFF 3
-
-#define VGA_DACMASK 0x3c6
-#define VGA_DACRX 0x3c7
-#define VGA_DACWX 0x3c8
-#define VGA_DACDATA 0x3c9
-
-#define VGA_CR_INDEX_MDA 0x3b4
-#define VGA_CR_DATA_MDA 0x3b5
-#define VGA_CR_INDEX_CGA 0x3d4
-#define VGA_CR_DATA_CGA 0x3d5
-
-
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
-#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
-#define CMD_REPORT_HEAD (7<<23)
-#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
-#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
-
-#define INST_PARSER_CLIENT 0x00000000
-#define INST_OP_FLUSH 0x02000000
-#define INST_FLUSH_MAP_CACHE 0x00000001
-
-#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
-#define MI_USER_INTERRUPT MI_INSTR(2, (0 << 29))
-#define MI_FLUSH (0x04 << 23)
-#define MI_NO_WRITE_FLUSH (1 << 2)
-#define MI_READ_FLUSH (1 << 0)
-#define MI_EXE_FLUSH (1 << 1)
-#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
-#define MI_STORE_DWORD_INDEX_SHIFT 2
-
-#define BB1_START_ADDR_MASK (~0x7)
-#define BB1_PROTECTED (1<<0)
-#define BB1_UNPROTECTED (0<<0)
-#define BB2_END_ADDR_MASK (~0x7)
-
-#define I915REG_PGTBL_CTRL 0x2020
-#define IPEIR 0x02088
-#define HWSTAM 0x02098
-#define IIR 0x020a4
-#define IMR 0x020a8
-#define IER 0x020a0
-#define INSTPM 0x020c0
-#define ACTHD 0x020c8
-#define PIPEASTAT 0x70024
-#define PIPEBSTAT 0x71024
-#define ACTHD_I965 0x02074
-#define HWS_PGA 0x02080
-#define IPEIR_I965 0x02064
-#define IPEHR_I965 0x02068
-#define INSTDONE_I965 0x0206c
-#define INSTPS 0x02070 /* 965+ only */
-#define INSTDONE1 0x0207c /* 965+ only */
-#define IPEHR 0x0208c
-#define INSTDONE 0x02090
-#define EIR 0x020b0
-#define EMR 0x020b4
-#define ESR 0x020b8
-#define GM45_ERROR_PAGE_TABLE (1<<5)
-#define GM45_ERROR_MEM_PRIV (1<<4)
-#define I915_ERROR_PAGE_TABLE (1<<4)
-#define GM45_ERROR_CP_PRIV (1<<3)
-#define I915_ERROR_MEMORY_REFRESH (1<<1)
-#define I915_ERROR_INSTRUCTION (1<<0)
-
-#define PIPEA_FRMCOUNT_GM45 0x70040
-#define PIPEA_FLIPCOUNT_GM45 0x70044
-#define PIPEB_FRMCOUNT_GM45 0x71040
-#define PIPEB_FLIPCOUNT_GM45 0x71044
-
-#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
-#define I915_VBLANK_CLEAR (1UL<<1)
-
-#define SRX_INDEX 0x3c4
-#define SRX_DATA 0x3c5
-#define SR01 1
-#define SR01_SCREEN_OFF (1<<5)
-
-#define PPCR 0x61204
-#define PPCR_ON (1<<0)
-
-#define DVOB 0x61140
-#define DVOB_ON (1<<31)
-#define DVOC 0x61160
-#define DVOC_ON (1<<31)
-#define LVDS 0x61180
-#define LVDS_ON (1<<31)
-
-#define ADPA 0x61100
-#define ADPA_DPMS_MASK (~(3<<10))
-#define ADPA_DPMS_ON (0<<10)
-#define ADPA_DPMS_SUSPEND (1<<10)
-#define ADPA_DPMS_STANDBY (2<<10)
-#define ADPA_DPMS_OFF (3<<10)
-
-#ifdef NOPID
-#undef NOPID
-#endif
-#define NOPID 0x2094
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define TAIL_ADDR 0x001FFFF8
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x0xFFFFF000
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
-#define PGTBL_ER 0x02024
-#define PRB0_TAIL 0x02030
-#define PRB0_HEAD 0x02034
-#define PRB0_START 0x02038
-#define PRB0_CTL 0x0203c
-#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define SC_UPDATE_SCISSOR (0x1<<1)
-#define SC_ENABLE_MASK (0x1<<0)
-#define SC_ENABLE (0x1<<0)
-
-#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
-#define SCI_YMIN_MASK (0xffff<<16)
-#define SCI_XMIN_MASK (0xffff<<0)
-#define SCI_YMAX_MASK (0xffff<<16)
-#define SCI_XMAX_MASK (0xffff<<0)
-
-#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
-#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
-#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
-#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
-
-#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
-
-#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
-#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
-#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
-#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
-#define XY_SRC_COPY_BLT_SRC_TILED (1<<15)
-#define XY_SRC_COPY_BLT_DST_TILED (1<<11)
-
-#define MI_BATCH_BUFFER ((0x30<<23)|1)
-#define MI_BATCH_BUFFER_START (0x31<<23)
-#define MI_BATCH_BUFFER_END (0xA<<23)
-#define MI_BATCH_NON_SECURE (1)
-
-#define MI_BATCH_NON_SECURE_I965 (1<<8)
-
-#define MI_WAIT_FOR_EVENT ((0x3<<23))
-#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
-#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
-#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
-
-#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
-
-#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
-#define ASYNC_FLIP (1<<22)
-#define DISPLAY_PLANE_A (0<<20)
-#define DISPLAY_PLANE_B (1<<20)
-
-#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
-
-/**
- * Reads a dword out of the status page, which is written to from the command
- * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
- * MI_STORE_DATA_IMM.
- *
- * The following dwords have a reserved meaning:
- * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
- * 0x04: ring 0 head pointer
- * 0x05: ring 1 head pointer (915-class)
- * 0x06: ring 2 head pointer (915-class)
- * 0x10-0x1b: Context status DWords (GM45)
- * 0x1f: Last written status offset. (GM45)
- *
- * The area from dword 0x20 to 0x3ff is available for driver usage.
- */
-#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
-#define I915_GEM_HWS_INDEX 0x20
-#define I915_BREADCRUMB_INDEX 0x21
-
-/*
- * add here for S3 support
- */
-#define DPLL_A 0x06014
-#define DPLL_B 0x06018
-# define DPLL_VCO_ENABLE 0x80000000 /* (1 << 31) */
-# define DPLL_DVO_HIGH_SPEED (1 << 30)
-# define DPLL_SYNCLOCK_ENABLE (1 << 29)
-# define DPLL_VGA_MODE_DIS (1 << 28)
-# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
-# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
-# define DPLL_MODE_MASK (3 << 26)
-# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
-# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
-# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
-# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
-# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
-# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
-
-/**
- * The i830 generation, in DAC/serial mode, defines p1 as two plus this
- * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
- */
-# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
-/**
- * The i830 generation, in LVDS mode, defines P1 as the bit number set within
- * this field (only one bit may be set).
- */
-# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
-# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
-# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required in DVO non-gang */
-# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
-# define PLL_REF_INPUT_DREFCLK (0 << 13)
-# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
-# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
-# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
-# define PLL_REF_INPUT_MASK (3 << 13)
-# define PLL_LOAD_PULSE_PHASE_SHIFT 9
-
-/* IGDNG */
-#define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
-#define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
-#define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
-#define DPLL_FPA1_P1_POST_DIV_SHIFT 0
-#define DPLL_FPA1_P1_POST_DIV_MASK 0xff
-
-/*
- * Parallel to Serial Load Pulse phase selection.
- * Selects the phase for the 10X DPLL clock for the PCIe
- * digital display port. The range is 4 to 13; 10 or more
- * is just a flip delay. The default is 6
- */
-# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
-# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
-
-/**
- * SDVO multiplier for 945G/GM. Not used on 965.
- *
- * \sa DPLL_MD_UDI_MULTIPLIER_MASK
- */
-# define SDVO_MULTIPLIER_MASK 0x000000ff
-# define SDVO_MULTIPLIER_SHIFT_HIRES 4
-# define SDVO_MULTIPLIER_SHIFT_VGA 0
-
-/** @defgroup DPLL_MD
- * @{
- */
-/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
-#define DPLL_A_MD 0x0601c
-/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
-#define DPLL_B_MD 0x06020
-/**
- * UDI pixel divider, controlling how many pixels are stuffed into a packet.
- *
- * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
- */
-# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
-# define DPLL_MD_UDI_DIVIDER_SHIFT 24
-/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
-# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
-# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
-/**
- * SDVO/UDI pixel multiplier.
- *
- * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
- * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
- * modes, the bus rate would be below the limits, so SDVO allows for stuffing
- * dummy bytes in the datastream at an increased clock rate, with both sides of
- * the link knowing how many bytes are fill.
- *
- * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
- * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
- * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
- * through an SDVO command.
- *
- * This register field has values of multiplication factor minus 1, with
- * a maximum multiplier of 5 for SDVO.
- */
-# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
-# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
-/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
- * This best be set to the default value (3) or the CRT won't work. No,
- * I don't entirely understand what this does...
- */
-# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
-# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
-/** @} */
-
-#define DPLL_TEST 0x606c
-# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
-# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
-# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
-# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
-# define DPLLB_TEST_N_BYPASS (1 << 19)
-# define DPLLB_TEST_M_BYPASS (1 << 18)
-# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
-# define DPLLA_TEST_N_BYPASS (1 << 3)
-# define DPLLA_TEST_M_BYPASS (1 << 2)
-# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
-
-/*
- * Palette registers
- */
-#define PALETTE_A 0x0a000
-#define PALETTE_B 0x0a800
-
-/* MCH MMIO space */
-
-/*
- * MCHBAR mirror.
- *
- * This mirrors the MCHBAR MMIO space whose location is determined by
- * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
- * every way. It is not accessible from the CP register read instructions.
- *
- */
-#define MCHBAR_MIRROR_BASE 0x10000
-
-/** 915-945 and GM965 MCH register controlling DRAM channel access */
-#define DCC 0x10200
-#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
-#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
-#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
-#define DCC_ADDRESSING_MODE_MASK (3 << 0)
-#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
-#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
-
-/** 965 MCH register controlling DRAM channel configuration */
-#define C0DRB3 0x10206
-#define C1DRB3 0x10606
-
-/** GM965 GM45 render standby register */
-#define MCHBAR_RENDER_STANDBY 0x111B8
-
-#define FPA0 0x06040
-#define FPA1 0x06044
-#define FPB0 0x06048
-#define FPB1 0x0604c
-
-#define D_STATE 0x6104
-#define CG_2D_DIS 0x6200
-#define CG_3D_DIS 0x6204
-
-#define MI_ARB_STATE 0x20e4
-
-/*
- * Cache mode 0 reg.
- * - Manipulating render cache behaviour is central
- * to the concept of zone rendering, tuning this reg can help avoid
- * unnecessary render cache reads and even writes (for z/stencil)
- * at beginning and end of scene.
- *
- * - To change a bit, write to this reg with a mask bit set and the
- * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
- */
-#define CACHE_MODE_0 0x2120
-
-/* I830 CRTC registers */
-#define HTOTAL_A 0x60000
-#define HBLANK_A 0x60004
-#define HSYNC_A 0x60008
-#define VTOTAL_A 0x6000c
-#define VBLANK_A 0x60010
-#define VSYNC_A 0x60014
-#define PIPEASRC 0x6001c
-#define BCLRPAT_A 0x60020
-#define VSYNCSHIFT_A 0x60028
-
-#define HTOTAL_B 0x61000
-#define HBLANK_B 0x61004
-#define HSYNC_B 0x61008
-#define VTOTAL_B 0x6100c
-#define VBLANK_B 0x61010
-#define VSYNC_B 0x61014
-#define PIPEBSRC 0x6101c
-#define BCLRPAT_B 0x61020
-#define VSYNCSHIFT_B 0x61028
-
-#define DSPACNTR 0x70180
-#define DSPBCNTR 0x71180
-#define DISPLAY_PLANE_ENABLE (1<<31)
-#define DISPLAY_PLANE_DISABLE 0
-#define DISPPLANE_GAMMA_ENABLE (1<<30)
-#define DISPPLANE_GAMMA_DISABLE 0
-#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
-#define DISPPLANE_8BPP (0x2<<26)
-#define DISPPLANE_15_16BPP (0x4<<26)
-#define DISPPLANE_16BPP (0x5<<26)
-#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
-#define DISPPLANE_32BPP (0x7<<26)
-#define DISPPLANE_STEREO_ENABLE (1<<25)
-#define DISPPLANE_STEREO_DISABLE 0
-#define DISPPLANE_SEL_PIPE_MASK (1<<24)
-#define DISPPLANE_SEL_PIPE_A 0
-#define DISPPLANE_SEL_PIPE_B (1<<24)
-#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
-#define DISPPLANE_SRC_KEY_DISABLE 0
-#define DISPPLANE_LINE_DOUBLE (1<<20)
-#define DISPPLANE_NO_LINE_DOUBLE 0
-#define DISPPLANE_STEREO_POLARITY_FIRST 0
-#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
-/* plane B only */
-#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
-#define DISPPLANE_ALPHA_TRANS_DISABLE 0
-#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
-#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
-
-#define DSPABASE 0x70184
-#define DSPASTRIDE 0x70188
-
-#define DSPBBASE 0x71184
-#define DSPBADDR DSPBBASE
-#define DSPBSTRIDE 0x71188
-
-#define DSPAKEYVAL 0x70194
-#define DSPAKEYMASK 0x70198
-
-#define DSPAPOS 0x7018C /* reserved */
-#define DSPASIZE 0x70190
-#define DSPBPOS 0x7118C
-#define DSPBSIZE 0x71190
-
-#define DSPASURF 0x7019C
-#define DSPATILEOFF 0x701A4
-
-#define DSPBSURF 0x7119C
-#define DSPBTILEOFF 0x711A4
-
-#define PIPEACONF 0x70008
-#define PIPEACONF_ENABLE (1UL<<31)
-#define PIPEACONF_DISABLE 0
-#define PIPEACONF_DOUBLE_WIDE (1<<30)
-#define I965_PIPECONF_ACTIVE (1<<30)
-#define PIPEACONF_SINGLE_WIDE 0
-#define PIPEACONF_PIPE_UNLOCKED 0
-#define PIPEACONF_PIPE_LOCKED (1<<25)
-#define PIPEACONF_PALETTE 0
-#define PIPEACONF_GAMMA (1<<24)
-#define PIPECONF_FORCE_BORDER (1<<25)
-#define PIPECONF_PROGRESSIVE (0 << 21)
-#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
-#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
-
-#define PIPEBCONF 0x71008
-#define PIPEBCONF_ENABLE (1UL<<31)
-#define PIPEBCONF_DISABLE 0
-#define PIPEBCONF_DOUBLE_WIDE (1<<30)
-#define PIPEBCONF_DISABLE 0
-#define PIPEBCONF_GAMMA (1<<24)
-#define PIPEBCONF_PALETTE 0
-
-#define BLC_PWM_CTL 0x61254
-#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
-
-#define BLC_PWM_CTL2 0x61250
-
-#define PFIT_CONTROL 0x61230
-#define PFIT_PGM_RATIOS 0x61234
-
-/**
- * Indicates that all dependencies of the panel are on:
- *
- * - PLL enabled
- * - pipe enabled
- * - LVDS/DVOB/DVOC on
- */
-#define PP_READY (1 << 30)
-#define PP_SEQUENCE_NONE (0 << 28)
-#define PP_SEQUENCE_ON (1 << 28)
-#define PP_SEQUENCE_OFF (2 << 28)
-#define PP_SEQUENCE_MASK 0x30000000
-#define PP_CONTROL 0x61204
-#define POWER_TARGET_ON (1 << 0)
-
-#define LVDSPP_ON 0x61208
-#define LVDSPP_OFF 0x6120c
-#define PP_CYCLE 0x61210
-
-/* Framebuffer compression */
-#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
-#define FBC_LL_BASE 0x03204 /* 4k page aligned */
-#define FBC_CONTROL 0x03208
-
-#define VGACNTRL 0x71400
-
-#define VCLK_DIVISOR_VGA0 0x6000
-#define VCLK_DIVISOR_VGA1 0x6004
-#define VCLK_POST_DIV 0x6010
-
-/* Framebuffer compression */
-#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
-#define FBC_LL_BASE 0x03204 /* 4k page aligned */
-#define FBC_CONTROL 0x03208
-#define FBC_CTL_EN (1<<31)
-#define FBC_CTL_PERIODIC (1<<30)
-#define FBC_CTL_INTERVAL_SHIFT (16)
-#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
-#define FBC_CTL_STRIDE_SHIFT (5)
-#define FBC_CTL_FENCENO (1<<0)
-#define FBC_COMMAND 0x0320c
-#define FBC_CMD_COMPRESS (1<<0)
-#define FBC_STATUS 0x03210
-#define FBC_STAT_COMPRESSING (1<<31)
-#define FBC_STAT_COMPRESSED (1<<30)
-#define FBC_STAT_MODIFIED (1<<29)
-#define FBC_STAT_CURRENT_LINE (1<<0)
-#define FBC_CONTROL2 0x03214
-#define FBC_CTL_FENCE_DBL (0<<4)
-#define FBC_CTL_IDLE_IMM (0<<2)
-#define FBC_CTL_IDLE_FULL (1<<2)
-#define FBC_CTL_IDLE_LINE (2<<2)
-#define FBC_CTL_IDLE_DEBUG (3<<2)
-#define FBC_CTL_CPU_FENCE (1<<1)
-#define FBC_CTL_PLANEA (0<<0)
-#define FBC_CTL_PLANEB (1<<0)
-#define FBC_FENCE_OFF 0x0321b
-
-#define FBC_LL_SIZE (1536)
-#define FBC_LL_PAD (32)
-
-#define DSPARB 0x70030
-
-#define PIPEAFRAMEHIGH 0x70040
-#define PIPEBFRAMEHIGH 0x71040
-#define PIPE_FRAME_HIGH_MASK 0x0000ffff
-#define PIPE_FRAME_HIGH_SHIFT 0
-#define PIPEAFRAMEPIXEL 0x70044
-#define PIPEBFRAMEPIXEL 0x71044
-
-#define PIPE_FRAME_LOW_MASK 0xff000000
-#define PIPE_FRAME_LOW_SHIFT 24
-
-/* Interrupt bits:
- */
-#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
-#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
-#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
-#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
-#define I915_HWB_OOM_INTERRUPT (1<<13) /* binner out of memory */
-#define I915_SYNC_STATUS_INTERRUPT (1<<12)
-#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
-#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
-#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
-#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
-#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
-#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
-#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
-#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
-#define I915_DEBUG_INTERRUPT (1<<2)
-#define I915_USER_INTERRUPT (1<<1)
-#define I915_ASLE_INTERRUPT (1<<0)
-
-#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
-#define I915_CRC_ERROR_ENABLE (1UL<<29)
-#define I915_CRC_DONE_ENABLE (1UL<<28)
-#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
-#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
-#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
-#define I915_DPST_EVENT_ENABLE (1UL<<23)
-#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
-#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
-#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
-#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
-#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
-#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
-#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
-#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
-#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
-#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
-#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
-#define I915_DPST_EVENT_STATUS (1UL<<7)
-#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
-#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
-#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
-#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
-#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
-#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
-
-/* GM45+ just has to be different */
-#define PIPEA_FRMCOUNT_GM45 0x70040
-#define PIPEA_FLIPCOUNT_GM45 0x70044
-#define PIPEB_FRMCOUNT_GM45 0x71040
-#define PIPEB_FLIPCOUNT_GM45 0x71044
-
-/*
- * Some BIOS scratch area registers. The 845 (and 830?) store the amount
- * of video memory available to the BIOS in SWF1.
- */
-
-#define SWF0 0x71410
-
-/*
- * 855 scratch registers.
- */
-#define SWF10 0x70410
-
-#define SWF30 0x72414
-
-/* IGDNG */
-
-#define CPU_VGACNTRL 0x41000
-
-#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030
-#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
-#define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2)
-#define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2)
-#define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2)
-#define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2)
-#define DIGITAL_PORTA_NO_DETECT (0 << 0)
-#define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1)
-#define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0)
-
-/* refresh rate hardware control */
-#define RR_HW_CTL 0x45300
-#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
-#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
-
-#define FDI_PLL_BIOS_0 0x46000
-#define FDI_PLL_BIOS_1 0x46004
-#define FDI_PLL_BIOS_2 0x46008
-#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
-#define DISPLAY_PORT_PLL_BIOS_1 0x46010
-#define DISPLAY_PORT_PLL_BIOS_2 0x46014
-
-#define FDI_PLL_FREQ_CTL 0x46030
-#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
-#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
-#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
-
-
-#define PIPEA_DATA_M1 0x60030
-#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
-#define TU_SIZE_MASK 0x7e000000
-#define PIPEA_DATA_M1_OFFSET 0
-#define PIPEA_DATA_N1 0x60034
-#define PIPEA_DATA_N1_OFFSET 0
-
-#define PIPEA_DATA_M2 0x60038
-#define PIPEA_DATA_M2_OFFSET 0
-#define PIPEA_DATA_N2 0x6003c
-#define PIPEA_DATA_N2_OFFSET 0
-
-#define PIPEA_LINK_M1 0x60040
-#define PIPEA_LINK_M1_OFFSET 0
-#define PIPEA_LINK_N1 0x60044
-#define PIPEA_LINK_N1_OFFSET 0
-
-#define PIPEA_LINK_M2 0x60048
-#define PIPEA_LINK_M2_OFFSET 0
-#define PIPEA_LINK_N2 0x6004c
-#define PIPEA_LINK_N2_OFFSET 0
-
-/* PIPEB timing regs are same start from 0x61000 */
-
-#define PIPEB_DATA_M1 0x61030
-#define PIPEB_DATA_M1_OFFSET 0
-#define PIPEB_DATA_N1 0x61034
-#define PIPEB_DATA_N1_OFFSET 0
-
-#define PIPEB_DATA_M2 0x61038
-#define PIPEB_DATA_M2_OFFSET 0
-#define PIPEB_DATA_N2 0x6103c
-#define PIPEB_DATA_N2_OFFSET 0
-
-#define PIPEB_LINK_M1 0x61040
-#define PIPEB_LINK_M1_OFFSET 0
-#define PIPEB_LINK_N1 0x61044
-#define PIPEB_LINK_N1_OFFSET 0
-
-#define PIPEB_LINK_M2 0x61048
-#define PIPEB_LINK_M2_OFFSET 0
-#define PIPEB_LINK_N2 0x6104c
-#define PIPEB_LINK_N2_OFFSET 0
-
-/* CPU panel fitter */
-#define PFA_CTL_1 0x68080
-#define PFB_CTL_1 0x68880
-#define PF_ENABLE (1<<31)
-
-/* legacy palette */
-#define LGC_PALETTE_A 0x4a000
-#define LGC_PALETTE_B 0x4a800
-
-/* interrupts */
-#define DE_MASTER_IRQ_CONTROL (0x80000000)
-#define DE_SPRITEB_FLIP_DONE (1 << 29)
-#define DE_SPRITEA_FLIP_DONE (1 << 28)
-#define DE_PLANEB_FLIP_DONE (1 << 27)
-#define DE_PLANEA_FLIP_DONE (1 << 26)
-#define DE_PCU_EVENT (1 << 25)
-#define DE_GTT_FAULT (1 << 24)
-#define DE_POISON (1 << 23)
-#define DE_PERFORM_COUNTER (1 << 22)
-#define DE_PCH_EVENT (1 << 21)
-#define DE_AUX_CHANNEL_A (1 << 20)
-#define DE_DP_A_HOTPLUG (1 << 19)
-#define DE_GSE (1 << 18)
-#define DE_PIPEB_VBLANK (1 << 15)
-#define DE_PIPEB_EVEN_FIELD (1 << 14)
-#define DE_PIPEB_ODD_FIELD (1 << 13)
-#define DE_PIPEB_LINE_COMPARE (1 << 12)
-#define DE_PIPEB_VSYNC (1 << 11)
-#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
-#define DE_PIPEA_VBLANK (1 << 7)
-#define DE_PIPEA_EVEN_FIELD (1 << 6)
-#define DE_PIPEA_ODD_FIELD (1 << 5)
-#define DE_PIPEA_LINE_COMPARE (1 << 4)
-#define DE_PIPEA_VSYNC (1 << 3)
-#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
-
-#define DEISR 0x44000
-#define DEIMR 0x44004
-#define DEIIR 0x44008
-#define DEIER 0x4400c
-
-/* GT interrupt */
-#define GT_SYNC_STATUS (1 << 2)
-#define GT_USER_INTERRUPT (1 << 0)
-
-#define GTISR 0x44010
-#define GTIMR 0x44014
-#define GTIIR 0x44018
-#define GTIER 0x4401c
-
-/* PCH */
-
-/* south display engine interrupt */
-#define SDE_CRT_HOTPLUG (1 << 11)
-#define SDE_PORTD_HOTPLUG (1 << 10)
-#define SDE_PORTC_HOTPLUG (1 << 9)
-#define SDE_PORTB_HOTPLUG (1 << 8)
-#define SDE_SDVOB_HOTPLUG (1 << 6)
-
-#define SDEISR 0xc4000
-#define SDEIMR 0xc4004
-#define SDEIIR 0xc4008
-#define SDEIER 0xc400c
-
-/* digital port hotplug */
-#define PCH_PORT_HOTPLUG 0xc4030
-#define PORTD_HOTPLUG_ENABLE (1 << 20)
-#define PORTD_PULSE_DURATION_2ms (0)
-#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
-#define PORTD_PULSE_DURATION_6ms (2 << 18)
-#define PORTD_PULSE_DURATION_100ms (3 << 18)
-#define PORTD_HOTPLUG_NO_DETECT (0)
-#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
-#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
-#define PORTC_HOTPLUG_ENABLE (1 << 12)
-#define PORTC_PULSE_DURATION_2ms (0)
-#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
-#define PORTC_PULSE_DURATION_6ms (2 << 10)
-#define PORTC_PULSE_DURATION_100ms (3 << 10)
-#define PORTC_HOTPLUG_NO_DETECT (0)
-#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
-#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
-#define PORTB_HOTPLUG_ENABLE (1 << 4)
-#define PORTB_PULSE_DURATION_2ms (0)
-#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
-#define PORTB_PULSE_DURATION_6ms (2 << 2)
-#define PORTB_PULSE_DURATION_100ms (3 << 2)
-#define PORTB_HOTPLUG_NO_DETECT (0)
-#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
-#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
-
-#define PCH_GPIOA 0xc5010
-#define PCH_GPIOB 0xc5014
-#define PCH_GPIOC 0xc5018
-#define PCH_GPIOD 0xc501c
-#define PCH_GPIOE 0xc5020
-#define PCH_GPIOF 0xc5024
-
-#define PCH_DPLL_A 0xc6014
-#define PCH_DPLL_B 0xc6018
-
-#define PCH_FPA0 0xc6040
-#define PCH_FPA1 0xc6044
-#define PCH_FPB0 0xc6048
-#define PCH_FPB1 0xc604c
-
-#define PCH_DPLL_TEST 0xc606c
-
-#define PCH_DREF_CONTROL 0xC6200
-#define DREF_CONTROL_MASK 0x7fc3
-#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
-#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
-#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13)
-#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
-#define DREF_SSC_SOURCE_DISABLE (0<<11)
-#define DREF_SSC_SOURCE_ENABLE (2<<11)
-#define DREF_SSC_SOURCE_MASK (2<<11)
-#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
-#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
-#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
-#define DREF_NONSPREAD_SOURCE_MASK (2<<9)
-#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
-#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
-#define DREF_SSC4_DOWNSPREAD (0<<6)
-#define DREF_SSC4_CENTERSPREAD (1<<6)
-#define DREF_SSC1_DISABLE (0<<1)
-#define DREF_SSC1_ENABLE (1<<1)
-#define DREF_SSC4_DISABLE (0)
-#define DREF_SSC4_ENABLE (1)
-
-#define PCH_RAWCLK_FREQ 0xc6204
-#define FDL_TP1_TIMER_SHIFT 12
-#define FDL_TP1_TIMER_MASK (3<<12)
-#define FDL_TP2_TIMER_SHIFT 10
-#define FDL_TP2_TIMER_MASK (3<<10)
-#define RAWCLK_FREQ_MASK 0x3ff
-
-#define PCH_DPLL_TMR_CFG 0xc6208
-
-#define PCH_SSC4_PARMS 0xc6210
-#define PCH_SSC4_AUX_PARMS 0xc6214
-
-/* transcoder */
-
-#define TRANS_HTOTAL_A 0xe0000
-#define TRANS_HTOTAL_SHIFT 16
-#define TRANS_HACTIVE_SHIFT 0
-#define TRANS_HBLANK_A 0xe0004
-#define TRANS_HBLANK_END_SHIFT 16
-#define TRANS_HBLANK_START_SHIFT 0
-#define TRANS_HSYNC_A 0xe0008
-#define TRANS_HSYNC_END_SHIFT 16
-#define TRANS_HSYNC_START_SHIFT 0
-#define TRANS_VTOTAL_A 0xe000c
-#define TRANS_VTOTAL_SHIFT 16
-#define TRANS_VACTIVE_SHIFT 0
-#define TRANS_VBLANK_A 0xe0010
-#define TRANS_VBLANK_END_SHIFT 16
-#define TRANS_VBLANK_START_SHIFT 0
-#define TRANS_VSYNC_A 0xe0014
-#define TRANS_VSYNC_END_SHIFT 16
-#define TRANS_VSYNC_START_SHIFT 0
-
-#define TRANSA_DATA_M1 0xe0030
-#define TRANSA_DATA_N1 0xe0034
-#define TRANSA_DATA_M2 0xe0038
-#define TRANSA_DATA_N2 0xe003c
-#define TRANSA_DP_LINK_M1 0xe0040
-#define TRANSA_DP_LINK_N1 0xe0044
-#define TRANSA_DP_LINK_M2 0xe0048
-#define TRANSA_DP_LINK_N2 0xe004c
-
-#define TRANS_HTOTAL_B 0xe1000
-#define TRANS_HBLANK_B 0xe1004
-#define TRANS_HSYNC_B 0xe1008
-#define TRANS_VTOTAL_B 0xe100c
-#define TRANS_VBLANK_B 0xe1010
-#define TRANS_VSYNC_B 0xe1014
-
-#define TRANSB_DATA_M1 0xe1030
-#define TRANSB_DATA_N1 0xe1034
-#define TRANSB_DATA_M2 0xe1038
-#define TRANSB_DATA_N2 0xe103c
-#define TRANSB_DP_LINK_M1 0xe1040
-#define TRANSB_DP_LINK_N1 0xe1044
-#define TRANSB_DP_LINK_M2 0xe1048
-#define TRANSB_DP_LINK_N2 0xe104c
-
-#define TRANSACONF 0xf0008
-#define TRANSBCONF 0xf1008
-#define TRANS_DISABLE (0<<31)
-#define TRANS_ENABLE (1<<31)
-#define TRANS_STATE_MASK (1<<30)
-#define TRANS_STATE_DISABLE (0<<30)
-#define TRANS_STATE_ENABLE (1<<30)
-#define TRANS_FSYNC_DELAY_HB1 (0<<27)
-#define TRANS_FSYNC_DELAY_HB2 (1<<27)
-#define TRANS_FSYNC_DELAY_HB3 (2<<27)
-#define TRANS_FSYNC_DELAY_HB4 (3<<27)
-#define TRANS_DP_AUDIO_ONLY (1<<26)
-#define TRANS_DP_VIDEO_AUDIO (0<<26)
-#define TRANS_PROGRESSIVE (0<<21)
-#define TRANS_8BPC (0<<5)
-#define TRANS_10BPC (1<<5)
-#define TRANS_6BPC (2<<5)
-#define TRANS_12BPC (3<<5)
-
-#define FDI_RXA_CHICKEN 0xc200c
-#define FDI_RXB_CHICKEN 0xc2010
-#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
-
-/* CPU: FDI_TX */
-#define FDI_TXA_CTL 0x60100
-#define FDI_TXB_CTL 0x61100
-#define FDI_TX_DISABLE (0<<31)
-#define FDI_TX_ENABLE (1<<31)
-#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
-#define FDI_LINK_TRAIN_PATTERN_2 (1<<28)
-#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28)
-#define FDI_LINK_TRAIN_NONE (3<<28)
-#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25)
-#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
-#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
-#define FDI_DP_PORT_WIDTH_X1 (0<<19)
-#define FDI_DP_PORT_WIDTH_X2 (1<<19)
-#define FDI_DP_PORT_WIDTH_X3 (2<<19)
-#define FDI_DP_PORT_WIDTH_X4 (3<<19)
-#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
-/* IGDNG: hardwired to 1 */
-#define FDI_TX_PLL_ENABLE (1<<14)
-/* both Tx and Rx */
-#define FDI_SCRAMBLING_ENABLE (0<<7)
-#define FDI_SCRAMBLING_DISABLE (1<<7)
-
-/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
-#define FDI_RXA_CTL 0xf000c
-#define FDI_RXB_CTL 0xf100c
-#define FDI_RX_ENABLE (1<<31)
-#define FDI_RX_DISABLE (0<<31)
-/* train, dp width same as FDI_TX */
-#define FDI_DP_PORT_WIDTH_X8 (7<<19)
-#define FDI_8BPC (0<<16)
-#define FDI_10BPC (1<<16)
-#define FDI_6BPC (2<<16)
-#define FDI_12BPC (3<<16)
-#define FDI_LINK_REVERSE_OVERWRITE (1<<15)
-#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
-#define FDI_RX_PLL_ENABLE (1<<13)
-#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
-#define FDI_FE_ERR_CORRECT_ENABLE (1<<10)
-#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
-#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
-#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
-#define FDI_SEL_RAWCLK (0<<4)
-#define FDI_SEL_PCDCLK (1<<4)
-
-#define FDI_RXA_MISC 0xf0010
-#define FDI_RXB_MISC 0xf1010
-#define FDI_RXA_TUSIZE1 0xf0030
-#define FDI_RXA_TUSIZE2 0xf0038
-#define FDI_RXB_TUSIZE1 0xf1030
-#define FDI_RXB_TUSIZE2 0xf1038
-
-/* FDI_RX interrupt register format */
-#define FDI_RX_INTER_LANE_ALIGN (1<<10)
-#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */
-#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */
-#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7)
-#define FDI_RX_FS_CODE_ERR (1<<6)
-#define FDI_RX_FE_CODE_ERR (1<<5)
-#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4)
-#define FDI_RX_HDCP_LINK_FAIL (1<<3)
-#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2)
-#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
-#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
-
-#define FDI_RXA_IIR 0xf0014
-#define FDI_RXA_IMR 0xf0018
-#define FDI_RXB_IIR 0xf1014
-#define FDI_RXB_IMR 0xf1018
-
-#define FDI_PLL_CTL_1 0xfe000
-#define FDI_PLL_CTL_2 0xfe004
-
-/* CRT */
-#define PCH_ADPA 0xe1100
-#define ADPA_TRANS_SELECT_MASK (1<<30)
-#define ADPA_TRANS_A_SELECT 0
-#define ADPA_TRANS_B_SELECT (1<<30)
-#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
-#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
-#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
-#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
-#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
-#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
-#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
-#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
-#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
-#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
-#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
-#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-
-/* or SDVOB */
-#define HDMIB 0xe1140
-#define PORT_ENABLE (1 << 31)
-#define TRANSCODER_A (0)
-#define TRANSCODER_B (1 << 30)
-#define COLOR_FORMAT_8bpc (0)
-#define COLOR_FORMAT_12bpc (3 << 26)
-#define SDVOB_HOTPLUG_ENABLE (1 << 23)
-#define SDVO_ENCODING (0)
-#define TMDS_ENCODING (2 << 10)
-#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
-#define SDVOB_BORDER_ENABLE (1 << 7)
-#define AUDIO_ENABLE (1 << 6)
-#define VSYNC_ACTIVE_HIGH (1 << 4)
-#define HSYNC_ACTIVE_HIGH (1 << 3)
-#define PORT_DETECTED (1 << 2)
-
-#define HDMIC 0xe1150
-#define HDMID 0xe1160
-
-#define PCH_LVDS 0xe1180
-#define LVDS_DETECTED (1 << 1)
-
-#define BLC_PWM_CPU_CTL2 0x48250
-#define PWM_ENABLE (1 << 31)
-#define PWM_PIPE_A (0 << 29)
-#define PWM_PIPE_B (1 << 29)
-#define BLC_PWM_CPU_CTL 0x48254
-
-#define BLC_PWM_PCH_CTL1 0xc8250
-#define PWM_PCH_ENABLE (1 << 31)
-#define PWM_POLARITY_ACTIVE_LOW (1 << 29)
-#define PWM_POLARITY_ACTIVE_HIGH (0 << 29)
-#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28)
-#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28)
-
-#define BLC_PWM_PCH_CTL2 0xc8254
-
-#define PCH_PP_STATUS 0xc7200
-#define PCH_PP_CONTROL 0xc7204
-#define EDP_FORCE_VDD (1 << 3)
-#define EDP_BLC_ENABLE (1 << 2)
-#define PANEL_POWER_RESET (1 << 1)
-#define PANEL_POWER_OFF (0 << 0)
-#define PANEL_POWER_ON (1 << 0)
-#define PCH_PP_ON_DELAYS 0xc7208
-#define EDP_PANEL (1 << 30)
-#define PCH_PP_OFF_DELAYS 0xc720c
-#define PCH_PP_DIVISOR 0xc7210
-
-#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
-#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562
-#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582
-#define PCI_DEVICE_ID_INTEL_82865_IG 0x2572
-#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
-#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592
-#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
-#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2
-#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
-#define PCI_DEVICE_ID_INTEL_82946_GZ 0x2972
-#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
-#define PCI_DEVICE_ID_INTEL_82Q963_IG 0x2992
-#define PCI_DEVICE_ID_INTEL_82G965_IG 0x29a2
-#define PCI_DEVICE_ID_INTEL_GM965_IG 0x2a02
-#define PCI_DEVICE_ID_INTEL_GME965_IG 0x2a12
-#define PCI_DEVICE_ID_INTEL_82G33_IG 0x29c2
-#define PCI_DEVICE_ID_INTEL_82Q35_IG 0x29b2
-#define PCI_DEVICE_ID_INTEL_82Q33_IG 0x29d2
-#define PCI_DEVICE_ID_INTEL_CANTIGA_IG 0x2a42
-#define PCI_DEVICE_ID_INTEL_EL_IG 0x2e02
-#define PCI_DEVICE_ID_INTEL_82Q45_IG 0x2e12
-#define PCI_DEVICE_ID_INTEL_82G45_IG 0x2e22
-#define PCI_DEVICE_ID_INTEL_82G41_IG 0x2e32
-#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x42
-#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x46
-#define PCI_DEVICE_ID_INTEL_82B43_IG 0x2e42
-
-
-#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
-#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
-#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
-#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
-#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
-
-#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)
-#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
-#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
-#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GME_IG)
-
-#define IS_IGDNG_D(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_IGDNG_D_IG)
-#define IS_IGDNG_M(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_IGDNG_M_IG)
-#define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev))
-
-#define IS_I965G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82946_GZ || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_82G35_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_82Q963_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_82G965_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_GM965_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_GME965_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_CANTIGA_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_EL_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_82Q45_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_82G45_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_82B43_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_IGDNG_D_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_IGDNG_M_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_82G41_IG)
-
-#define IS_I965GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_GM965_IG)
-
-#define IS_GM45(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_CANTIGA_IG)
-
-#define IS_G4X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_EL_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_82Q45_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_82G45_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_82B43_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_82G41_IG)
-
-#define IS_G33(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82G33_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_82Q35_IG || \
- (dev)->pci_device == PCI_DEVICE_ID_INTEL_82Q33_IG)
-
-#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
- IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
- IS_IGDNG(dev))
-
-#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
- IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
- IS_IGDNG_M(dev))
-
-#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
-#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
-#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
-
-#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
- IS_IGDNG(dev))
-/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
- * rows, which changed the alignment requirements and fence programming.
- */
-#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
- IS_I915GM(dev)))
-
-#endif /* _I915_DRV_H */
diff --git a/usr/src/uts/intel/io/drm/i915_gem.c b/usr/src/uts/intel/io/drm/i915_gem.c
deleted file mode 100644
index 5570c2d..0000000
--- a/usr/src/uts/intel/io/drm/i915_gem.c
+++ /dev/null
@@ -1,2919 +0,0 @@
-/* BEGIN CSTYLED */
-
-/*
- * Copyright (c) 2009, Intel Corporation.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- *
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#include <sys/x86_archext.h>
-#include <sys/vfs_opreg.h>
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-#ifndef roundup
-#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
-#endif /* !roundup */
-
-#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
-
-static timeout_id_t worktimer_id = NULL;
-
-extern int drm_mm_init(struct drm_mm *mm,
- unsigned long start, unsigned long size);
-extern void drm_mm_put_block(struct drm_mm_node *cur);
-extern int choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off,
- int vacalign, uint_t flags);
-
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain);
-static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
-static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
- int write);
-static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
- int write);
-static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
- uint64_t offset,
- uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
-static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
-
-static void
-i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-
-/*ARGSUSED*/
-int
-i915_gem_init_ioctl(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_init args;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- DRM_COPYFROM_WITH_RETURN(&args,
- (struct drm_i915_gem_init *) data, sizeof(args));
-
- spin_lock(&dev->struct_mutex);
-
- if ((args.gtt_start >= args.gtt_end) ||
- ((args.gtt_start & (PAGE_SIZE - 1)) != 0) ||
- ((args.gtt_end & (PAGE_SIZE - 1)) != 0)) {
- spin_unlock(&dev->struct_mutex);
- DRM_ERROR("i915_gem_init_ioctel invalid arg 0x%lx args.start 0x%lx end 0x%lx", &args, args.gtt_start, args.gtt_end);
- return EINVAL;
- }
-
- dev->gtt_total = (uint32_t) (args.gtt_end - args.gtt_start);
-
- (void) drm_mm_init(&dev_priv->mm.gtt_space,
- (unsigned long) args.gtt_start, dev->gtt_total);
- DRM_DEBUG("i915_gem_init_ioctl dev->gtt_total %x, dev_priv->mm.gtt_space 0x%x gtt_start 0x%lx", dev->gtt_total, dev_priv->mm.gtt_space, args.gtt_start);
- ASSERT(dev->gtt_total != 0);
-
- spin_unlock(&dev->struct_mutex);
-
-
- return 0;
-}
-
-/*ARGSUSED*/
-int
-i915_gem_get_aperture_ioctl(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- struct drm_i915_gem_get_aperture args;
- int ret;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- args.aper_size = (uint64_t)dev->gtt_total;
- args.aper_available_size = (args.aper_size -
- atomic_read(&dev->pin_memory));
-
- ret = DRM_COPY_TO_USER((struct drm_i915_gem_get_aperture __user *) data, &args, sizeof(args));
-
- if ( ret != 0)
- DRM_ERROR(" i915_gem_get_aperture_ioctl error! %d", ret);
-
- DRM_DEBUG("i915_gem_get_aaperture_ioctl called sizeof %d, aper_size 0x%x, aper_available_size 0x%x\n", sizeof(args), dev->gtt_total, args.aper_available_size);
-
- return 0;
-}
-
-/**
- * Creates a new mm object and returns a handle to it.
- */
-/*ARGSUSED*/
-int
-i915_gem_create_ioctl(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- struct drm_i915_gem_create args;
- struct drm_gem_object *obj;
- int handlep;
- int ret;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- DRM_COPYFROM_WITH_RETURN(&args,
- (struct drm_i915_gem_create *) data, sizeof(args));
-
-
- args.size = (uint64_t) roundup(args.size, PAGE_SIZE);
-
- if (args.size == 0) {
- DRM_ERROR("Invalid obj size %d", args.size);
- return EINVAL;
- }
- /* Allocate the new object */
- obj = drm_gem_object_alloc(dev, args.size);
- if (obj == NULL) {
- DRM_ERROR("Failed to alloc obj");
- return ENOMEM;
- }
-
- ret = drm_gem_handle_create(fpriv, obj, &handlep);
- spin_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- spin_unlock(&dev->struct_mutex);
- if (ret)
- return ret;
-
- args.handle = handlep;
-
- ret = DRM_COPY_TO_USER((struct drm_i915_gem_create *) data, &args, sizeof(args));
-
- if ( ret != 0)
- DRM_ERROR(" gem create error! %d", ret);
-
- DRM_DEBUG("i915_gem_create_ioctl object name %d, size 0x%lx, list 0x%lx, obj 0x%lx",handlep, args.size, &fpriv->object_idr, obj);
-
- return 0;
-}
-
-/**
- * Reads data from the object referenced by handle.
- *
- * On error, the contents of *data are undefined.
- */
-/*ARGSUSED*/
-int
-i915_gem_pread_ioctl(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- struct drm_i915_gem_pread args;
- struct drm_gem_object *obj;
- int ret;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- DRM_COPYFROM_WITH_RETURN(&args,
- (struct drm_i915_gem_pread __user *) data, sizeof(args));
-
- obj = drm_gem_object_lookup(fpriv, args.handle);
- if (obj == NULL)
- return EBADF;
-
- /* Bounds check source.
- *
- * XXX: This could use review for overflow issues...
- */
- if (args.offset > obj->size || args.size > obj->size ||
- args.offset + args.size > obj->size) {
- drm_gem_object_unreference(obj);
- DRM_ERROR("i915_gem_pread_ioctl invalid args");
- return EINVAL;
- }
-
- spin_lock(&dev->struct_mutex);
-
- ret = i915_gem_object_set_cpu_read_domain_range(obj, args.offset, args.size);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
- DRM_ERROR("pread failed to read domain range ret %d!!!", ret);
- return EFAULT;
- }
-
- unsigned long unwritten = 0;
- uint32_t *user_data;
- user_data = (uint32_t *) (uintptr_t) args.data_ptr;
-
- unwritten = DRM_COPY_TO_USER(user_data, obj->kaddr + args.offset, args.size);
- if (unwritten) {
- ret = EFAULT;
- DRM_ERROR("i915_gem_pread error!!! unwritten %d", unwritten);
- }
-
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-/*ARGSUSED*/
-static int
-i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
-{
- uint32_t *user_data;
- int ret = 0;
- unsigned long unwritten = 0;
-
- user_data = (uint32_t *) (uintptr_t) args->data_ptr;
- spin_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(obj, 0);
- if (ret) {
- spin_unlock(&dev->struct_mutex);
- DRM_ERROR("i915_gem_gtt_pwrite failed to pin ret %d", ret);
- return ret;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
- if (ret)
- goto err;
-
- DRM_DEBUG("obj %d write domain 0x%x read domain 0x%x", obj->name, obj->write_domain, obj->read_domains);
-
- unwritten = DRM_COPY_FROM_USER(obj->kaddr + args->offset, user_data, args->size);
- if (unwritten) {
- ret = EFAULT;
- DRM_ERROR("i915_gem_gtt_pwrite error!!! unwritten %d", unwritten);
- goto err;
- }
-
-err:
- i915_gem_object_unpin(obj);
- spin_unlock(&dev->struct_mutex);
- if (ret)
- DRM_ERROR("i915_gem_gtt_pwrite error %d", ret);
- return ret;
-}
-
-/*ARGSUSED*/
-int
-i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
-{
- DRM_ERROR(" i915_gem_shmem_pwrite Not support");
- return -1;
-}
-
-/**
- * Writes data to the object referenced by handle.
- *
- * On error, the contents of the buffer that were to be modified are undefined.
- */
-/*ARGSUSED*/
-int
-i915_gem_pwrite_ioctl(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- struct drm_i915_gem_pwrite args;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret = 0;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- ret = DRM_COPY_FROM_USER(&args,
- (struct drm_i915_gem_pwrite __user *) data, sizeof(args));
- if (ret)
- DRM_ERROR("i915_gem_pwrite_ioctl failed to copy from user");
- obj = drm_gem_object_lookup(fpriv, args.handle);
- if (obj == NULL)
- return EBADF;
- obj_priv = obj->driver_private;
- DRM_DEBUG("i915_gem_pwrite_ioctl, obj->name %d",obj->name);
-
- /* Bounds check destination.
- *
- * XXX: This could use review for overflow issues...
- */
- if (args.offset > obj->size || args.size > obj->size ||
- args.offset + args.size > obj->size) {
- drm_gem_object_unreference(obj);
- DRM_ERROR("i915_gem_pwrite_ioctl invalid arg");
- return EINVAL;
- }
-
- /* We can only do the GTT pwrite on untiled buffers, as otherwise
- * it would end up going through the fenced access, and we'll get
- * different detiling behavior between reading and writing.
- * pread/pwrite currently are reading and writing from the CPU
- * perspective, requiring manual detiling by the client.
- */
- if (obj_priv->tiling_mode == I915_TILING_NONE &&
- dev->gtt_total != 0)
- ret = i915_gem_gtt_pwrite(dev, obj, &args, fpriv);
- else
- ret = i915_gem_shmem_pwrite(dev, obj, &args, fpriv);
-
- if (ret)
- DRM_ERROR("pwrite failed %d\n", ret);
-
- drm_gem_object_unreference(obj);
-
- return ret;
-}
-
-/**
- * Called when user space prepares to use an object with the CPU, either
- * through the mmap ioctl's mapping or a GTT mapping.
- */
-/*ARGSUSED*/
-int
-i915_gem_set_domain_ioctl(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- struct drm_i915_gem_set_domain args;
- struct drm_gem_object *obj;
- int ret = 0;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- DRM_COPYFROM_WITH_RETURN(&args,
- (struct drm_i915_gem_set_domain __user *) data, sizeof(args));
-
- uint32_t read_domains = args.read_domains;
- uint32_t write_domain = args.write_domain;
-
- /* Only handle setting domains to types used by the CPU. */
- if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
- ret = EINVAL;
-
- if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
- ret = EINVAL;
-
- /* Having something in the write domain implies it's in the read
- * domain, and only that read domain. Enforce that in the request.
- */
- if (write_domain != 0 && read_domains != write_domain)
- ret = EINVAL;
- if (ret) {
- DRM_ERROR("set_domain invalid read or write");
- return EINVAL;
- }
-
- obj = drm_gem_object_lookup(fpriv, args.handle);
- if (obj == NULL)
- return EBADF;
-
- spin_lock(&dev->struct_mutex);
- DRM_DEBUG("set_domain_ioctl %p(name %d size 0x%x), %08x %08x\n",
- obj, obj->name, obj->size, args.read_domains, args.write_domain);
-
- if (read_domains & I915_GEM_DOMAIN_GTT) {
- ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
-
- /* Silently promote "you're not bound, there was nothing to do"
- * to success, since the client was just asking us to
- * make sure everything was done.
- */
- if (ret == EINVAL)
- ret = 0;
- } else {
- ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
- }
-
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
- if (ret)
- DRM_ERROR("i915_set_domain_ioctl ret %d", ret);
- return ret;
-}
-
-/**
- * Called when user space has done writes to this buffer
- */
-/*ARGSUSED*/
-int
-i915_gem_sw_finish_ioctl(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- struct drm_i915_gem_sw_finish args;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret = 0;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- DRM_COPYFROM_WITH_RETURN(&args,
- (struct drm_i915_gem_sw_finish __user *) data, sizeof(args));
-
- spin_lock(&dev->struct_mutex);
- obj = drm_gem_object_lookup(fpriv, args.handle);
- if (obj == NULL) {
- spin_unlock(&dev->struct_mutex);
- return EBADF;
- }
-
- DRM_DEBUG("%s: sw_finish %d (%p name %d size 0x%x)\n",
- __func__, args.handle, obj, obj->name, obj->size);
-
- obj_priv = obj->driver_private;
- /* Pinned buffers may be scanout, so flush the cache */
- if (obj_priv->pin_count)
- {
- i915_gem_object_flush_cpu_write_domain(obj);
- }
-
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
- return ret;
-}
-
-/**
- * Maps the contents of an object, returning the address it is mapped
- * into.
- *
- * While the mapping holds a reference on the contents of the object, it doesn't
- * imply a ref on the object itself.
- */
-/*ARGSUSED*/
-int
-i915_gem_mmap_ioctl(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- struct drm_i915_gem_mmap args;
- struct drm_gem_object *obj;
- caddr_t vvaddr = NULL;
- int ret;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- DRM_COPYFROM_WITH_RETURN(
- &args, (struct drm_i915_gem_mmap __user *)data,
- sizeof (struct drm_i915_gem_mmap));
-
- obj = drm_gem_object_lookup(fpriv, args.handle);
- if (obj == NULL)
- return EBADF;
-
- ret = ddi_devmap_segmap(fpriv->dev, (off_t)obj->map->handle,
- ttoproc(curthread)->p_as, &vvaddr, obj->map->size,
- PROT_ALL, PROT_ALL, MAP_SHARED, fpriv->credp);
- if (ret)
- return ret;
-
- spin_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
-
- args.addr_ptr = (uint64_t)(uintptr_t)vvaddr;
-
- DRM_COPYTO_WITH_RETURN(
- (struct drm_i915_gem_mmap __user *)data,
- &args, sizeof (struct drm_i915_gem_mmap));
-
- return 0;
-}
-
-static void
-i915_gem_object_free_page_list(struct drm_gem_object *obj)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- if (obj_priv->page_list == NULL)
- return;
-
- kmem_free(obj_priv->page_list,
- btop(obj->size) * sizeof(caddr_t));
-
- obj_priv->page_list = NULL;
-}
-
-static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
-{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- /* Add a reference if we're newly entering the active list. */
- if (!obj_priv->active) {
- drm_gem_object_reference(obj);
- obj_priv->active = 1;
- }
- /* Move from whatever list we were on to the tail of execution. */
- list_move_tail(&obj_priv->list,
- &dev_priv->mm.active_list, (caddr_t)obj_priv);
- obj_priv->last_rendering_seqno = seqno;
-}
-
-static void
-i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list, (caddr_t)obj_priv);
- obj_priv->last_rendering_seqno = 0;
-}
-
-static void
-i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- if (obj_priv->pin_count != 0)
- {
- list_del_init(&obj_priv->list);
- } else {
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list, (caddr_t)obj_priv);
- }
- obj_priv->last_rendering_seqno = 0;
- if (obj_priv->active) {
- obj_priv->active = 0;
- drm_gem_object_unreference(obj);
- }
-}
-
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
-static uint32_t
-i915_add_request(struct drm_device *dev, uint32_t flush_domains)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_request *request;
- uint32_t seqno;
- int was_empty;
- RING_LOCALS;
-
- request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
- if (request == NULL) {
- DRM_ERROR("Failed to alloc request");
- return 0;
- }
- /* Grab the seqno we're going to make this request be, and bump the
- * next (skipping 0 so it can be the reserved no-seqno value).
- */
- seqno = dev_priv->mm.next_gem_seqno;
- dev_priv->mm.next_gem_seqno++;
- if (dev_priv->mm.next_gem_seqno == 0)
- dev_priv->mm.next_gem_seqno++;
-
- DRM_DEBUG("add_request seqno = %d dev 0x%lx", seqno, dev);
-
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(2);
- OUT_RING(0);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
-
- request->seqno = seqno;
- request->emitted_jiffies = jiffies;
- was_empty = list_empty(&dev_priv->mm.request_list);
- list_add_tail(&request->list, &dev_priv->mm.request_list, (caddr_t)request);
-
- /* Associate any objects on the flushing list matching the write
- * domain we're flushing with our flush.
- */
- if (flush_domains != 0) {
- struct drm_i915_gem_object *obj_priv, *next;
-
- obj_priv = list_entry(dev_priv->mm.flushing_list.next, struct drm_i915_gem_object, list),
- next = list_entry(obj_priv->list.next, struct drm_i915_gem_object, list);
- for(; &obj_priv->list != &dev_priv->mm.flushing_list;
- obj_priv = next,
- next = list_entry(next->list.next, struct drm_i915_gem_object, list)) {
- struct drm_gem_object *obj = obj_priv->obj;
-
- if ((obj->write_domain & flush_domains) ==
- obj->write_domain) {
- obj->write_domain = 0;
- i915_gem_object_move_to_active(obj, seqno);
- }
- }
-
- }
-
- if (was_empty && !dev_priv->mm.suspended)
- {
- /* change to delay HZ and then run work (not insert to workqueue of Linux) */
- worktimer_id = timeout(i915_gem_retire_work_handler, (void *) dev, DRM_HZ);
- DRM_DEBUG("i915_gem: schedule_delayed_work");
- }
- return seqno;
-}
-
-/**
- * Command execution barrier
- *
- * Ensures that all commands in the ring are finished
- * before signalling the CPU
- */
-uint32_t
-i915_retire_commands(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- uint32_t flush_domains = 0;
- RING_LOCALS;
-
- /* The sampler always gets flushed on i965 (sigh) */
- if (IS_I965G(dev))
- flush_domains |= I915_GEM_DOMAIN_SAMPLER;
- BEGIN_LP_RING(2);
- OUT_RING(cmd);
- OUT_RING(0); /* noop */
- ADVANCE_LP_RING();
-
- return flush_domains;
-}
-
-/**
- * Moves buffers associated only with the given active seqno from the active
- * to inactive list, potentially freeing them.
- */
-static void
-i915_gem_retire_request(struct drm_device *dev,
- struct drm_i915_gem_request *request)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- /* Move any buffers on the active list that are no longer referenced
- * by the ringbuffer to the flushing/inactive lists as appropriate.
- */
- while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = list_entry(dev_priv->mm.active_list.next,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
-
- /* If the seqno being retired doesn't match the oldest in the
- * list, then the oldest in the list must still be newer than
- * this seqno.
- */
- if (obj_priv->last_rendering_seqno != request->seqno)
- return;
-
- DRM_DEBUG("%s: retire %d moves to inactive list %p\n",
- __func__, request->seqno, obj);
-
- if (obj->write_domain != 0) {
- i915_gem_object_move_to_flushing(obj);
- } else {
- i915_gem_object_move_to_inactive(obj);
- }
- }
-}
-
-/**
- * Returns true if seq1 is later than seq2.
- */
-static int
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
-{
- return (int32_t)(seq1 - seq2) >= 0;
-}
-
-uint32_t
-i915_get_gem_seqno(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
-}
-
-/**
- * This function clears the request list as sequence numbers are passed.
- */
-void
-i915_gem_retire_requests(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t seqno;
-
- seqno = i915_get_gem_seqno(dev);
-
- while (!list_empty(&dev_priv->mm.request_list)) {
- struct drm_i915_gem_request *request;
- uint32_t retiring_seqno;
- request = (struct drm_i915_gem_request *)(uintptr_t)(dev_priv->mm.request_list.next->contain_ptr);
- retiring_seqno = request->seqno;
-
- if (i915_seqno_passed(seqno, retiring_seqno) ||
- dev_priv->mm.wedged) {
- i915_gem_retire_request(dev, request);
-
- list_del(&request->list);
- drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
- } else
- break;
- }
-}
-
-void
-i915_gem_retire_work_handler(void *device)
-{
- struct drm_device *dev = (struct drm_device *)device;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- spin_lock(&dev->struct_mutex);
-
- /* Return if gem idle */
- if (worktimer_id == NULL) {
- spin_unlock(&dev->struct_mutex);
- return;
- }
-
- i915_gem_retire_requests(dev);
- if (!dev_priv->mm.suspended && !list_empty(&dev_priv->mm.request_list))
- {
- DRM_DEBUG("i915_gem: schedule_delayed_work");
- worktimer_id = timeout(i915_gem_retire_work_handler, (void *) dev, DRM_HZ);
- }
- spin_unlock(&dev->struct_mutex);
-}
-
-/**
- * i965_reset - reset chip after a hang
- * @dev: drm device to reset
- * @flags: reset domains
- *
- * Reset the chip. Useful if a hang is detected.
- *
- * Procedure is fairly simple:
- * - reset the chip using the reset reg
- * - re-init context state
- * - re-init hardware status page
- * - re-init ring buffer
- * - re-init interrupt state
- * - re-init display
- */
-void i965_reset(struct drm_device *dev, u8 flags)
-{
- ddi_acc_handle_t conf_hdl;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int timeout = 0;
- uint8_t gdrst;
-
- if (flags & GDRST_FULL)
- i915_save_display(dev);
-
- if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) {
- DRM_ERROR(("i915_reset: pci_config_setup fail"));
- return;
- }
-
- /*
- * Set the reset bit, wait for reset, then clear it. Hardware
- * will clear the status bit (bit 1) when it's actually ready
- * for action again.
- */
- gdrst = pci_config_get8(conf_hdl, GDRST);
- pci_config_put8(conf_hdl, GDRST, gdrst | flags);
- drv_usecwait(50);
- pci_config_put8(conf_hdl, GDRST, gdrst | 0xfe);
-
- /* ...we don't want to loop forever though, 500ms should be plenty */
- do {
- drv_usecwait(100);
- gdrst = pci_config_get8(conf_hdl, GDRST);
- } while ((gdrst & 2) && (timeout++ < 5));
-
- /* Ok now get things going again... */
-
- /*
- * Everything depends on having the GTT running, so we need to start
- * there. Fortunately we don't need to do this unless we reset the
- * chip at a PCI level.
- *
- * Next we need to restore the context, but we don't use those
- * yet either...
- *
- * Ring buffer needs to be re-initialized in the KMS case, or if X
- * was running at the time of the reset (i.e. we weren't VT
- * switched away).
- */
- if (!dev_priv->mm.suspended) {
- drm_i915_ring_buffer_t *ring = &dev_priv->ring;
- struct drm_gem_object *obj = ring->ring_obj;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- dev_priv->mm.suspended = 0;
-
- /* Stop the ring if it's running. */
- I915_WRITE(PRB0_CTL, 0);
- I915_WRITE(PRB0_TAIL, 0);
- I915_WRITE(PRB0_HEAD, 0);
-
- /* Initialize the ring. */
- I915_WRITE(PRB0_START, obj_priv->gtt_offset);
- I915_WRITE(PRB0_CTL,
- ((obj->size - 4096) & RING_NR_PAGES) |
- RING_NO_REPORT |
- RING_VALID);
- i915_kernel_lost_context(dev);
-
- (void) drm_irq_install(dev);
- }
-
- /*
- * Display needs restore too...
- */
- if (flags & GDRST_FULL)
- i915_restore_display(dev);
-}
-
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 ier;
- int ret = 0;
-
- ASSERT(seqno != 0);
-
- if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
- if (IS_IGDNG(dev))
- ier = I915_READ(DEIER) | I915_READ(GTIER);
- else
- ier = I915_READ(IER);
- if (!ier) {
- DRM_ERROR("something (likely vbetool) disabled "
- "interrupts, re-enabling\n");
- (void) i915_driver_irq_preinstall(dev);
- i915_driver_irq_postinstall(dev);
- }
-
- dev_priv->mm.waiting_gem_seqno = seqno;
- i915_user_irq_on(dev);
- DRM_WAIT(ret, &dev_priv->irq_queue,
- (i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
- dev_priv->mm.wedged));
- i915_user_irq_off(dev);
- dev_priv->mm.waiting_gem_seqno = 0;
- }
- if (dev_priv->mm.wedged) {
- ret = EIO;
- }
-
- /* GPU maybe hang, reset needed*/
- if (ret == -2 && (seqno > i915_get_gem_seqno(dev))) {
- if (IS_I965G(dev)) {
- DRM_ERROR("GPU hang detected try to reset ... wait for irq_queue seqno %d, now seqno %d", seqno, i915_get_gem_seqno(dev));
- dev_priv->mm.wedged = 1;
- i965_reset(dev, GDRST_RENDER);
- i915_gem_retire_requests(dev);
- dev_priv->mm.wedged = 0;
- }
- else
- DRM_ERROR("GPU hang detected.... reboot required");
- return 0;
- }
- /* Directly dispatch request retiring. While we have the work queue
- * to handle this, the waiter on a request often wants an associated
- * buffer to have made it to the inactive list, and we would need
- * a separate wait queue to handle that.
- */
- if (ret == 0)
- i915_gem_retire_requests(dev);
-
- return ret;
-}
-
-static void
-i915_gem_flush(struct drm_device *dev,
- uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd;
- RING_LOCALS;
-
- DRM_DEBUG("%s: invalidate %08x flush %08x\n", __func__,
- invalidate_domains, flush_domains);
-
- if (flush_domains & I915_GEM_DOMAIN_CPU)
- drm_agp_chipset_flush(dev);
-
- if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)) {
- /*
- * read/write caches:
- *
- * I915_GEM_DOMAIN_RENDER is always invalidated, but is
- * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
- * also flushed at 2d versus 3d pipeline switches.
- *
- * read-only caches:
- *
- * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
- * MI_READ_FLUSH is set, and is always flushed on 965.
- *
- * I915_GEM_DOMAIN_COMMAND may not exist?
- *
- * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
- * invalidated when MI_EXE_FLUSH is set.
- *
- * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
- * invalidated with every MI_FLUSH.
- *
- * TLBs:
- *
- * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
- * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
- * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
- * are flushed at any MI_FLUSH.
- */
-
- cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- if ((invalidate_domains|flush_domains) &
- I915_GEM_DOMAIN_RENDER)
- cmd &= ~MI_NO_WRITE_FLUSH;
- if (!IS_I965G(dev)) {
- /*
- * On the 965, the sampler cache always gets flushed
- * and this bit is reserved.
- */
- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
- cmd |= MI_READ_FLUSH;
- }
- if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
- cmd |= MI_EXE_FLUSH;
-
- DRM_DEBUG("%s: queue flush %08x to ring\n", __func__, cmd);
-
- BEGIN_LP_RING(2);
- OUT_RING(cmd);
- OUT_RING(0); /* noop */
- ADVANCE_LP_RING();
- }
-}
-
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- */
-static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int ret, seqno;
-
- /* This function only exists to support waiting for existing rendering,
- * not for emitting required flushes.
- */
-
- if((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0) {
- DRM_ERROR("write domain should not be GPU DOMAIN %d", obj_priv->active);
- return 0;
- }
-
- /* If there is rendering queued on the buffer being evicted, wait for
- * it.
- */
- if (obj_priv->active) {
- DRM_DEBUG("%s: object %d %p wait for seqno %08x\n",
- __func__, obj->name, obj, obj_priv->last_rendering_seqno);
-
- seqno = obj_priv->last_rendering_seqno;
- if (seqno == 0) {
- DRM_DEBUG("last rendering maybe finished");
- return 0;
- }
- ret = i915_wait_request(dev, seqno);
- if (ret != 0) {
- DRM_ERROR("%s: i915_wait_request request->seqno %d now %d\n", __func__, seqno, i915_get_gem_seqno(dev));
- return ret;
- }
- }
-
- return 0;
-}
-
-/**
- * Unbinds an object from the GTT aperture.
- */
-int
-i915_gem_object_unbind(struct drm_gem_object *obj, uint32_t type)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int ret = 0;
-
- if (obj_priv->gtt_space == NULL)
- return 0;
-
- if (obj_priv->pin_count != 0) {
- DRM_ERROR("Attempting to unbind pinned buffer\n");
- return EINVAL;
- }
-
- /* Wait for any rendering to complete
- */
- ret = i915_gem_object_wait_rendering(obj);
- if (ret) {
- DRM_ERROR("wait_rendering failed: %d\n", ret);
- return ret;
- }
-
- /* Move the object to the CPU domain to ensure that
- * any possible CPU writes while it's not in the GTT
- * are flushed when we go to remap it. This will
- * also ensure that all pending GPU writes are finished
- * before we unbind.
- */
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret) {
- DRM_ERROR("set_domain failed: %d\n", ret);
- return ret;
- }
-
- if (!obj_priv->agp_mem) {
- (void) drm_agp_unbind_pages(dev, obj->size / PAGE_SIZE,
- obj_priv->gtt_offset, type);
- obj_priv->agp_mem = -1;
- }
-
- ASSERT(!obj_priv->active);
-
- i915_gem_object_free_page_list(obj);
-
- if (obj_priv->gtt_space) {
- atomic_dec(&dev->gtt_count);
- atomic_sub(obj->size, &dev->gtt_memory);
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
- }
-
- /* Remove ourselves from the LRU list if present. */
- if (!list_empty(&obj_priv->list))
- list_del_init(&obj_priv->list);
-
- return 0;
-}
-
-static int
-i915_gem_evict_something(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret = 0;
-
- for (;;) {
- /* If there's an inactive buffer available now, grab it
- * and be done.
- */
- if (!list_empty(&dev_priv->mm.inactive_list)) {
- obj_priv = list_entry(dev_priv->mm.inactive_list.next,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
- ASSERT(!(obj_priv->pin_count != 0));
- DRM_DEBUG("%s: evicting %d\n", __func__, obj->name);
- ASSERT(!(obj_priv->active));
- /* Wait on the rendering and unbind the buffer. */
- ret = i915_gem_object_unbind(obj, 1);
- break;
- }
- /* If we didn't get anything, but the ring is still processing
- * things, wait for one of those things to finish and hopefully
- * leave us a buffer to evict.
- */
- if (!list_empty(&dev_priv->mm.request_list)) {
- struct drm_i915_gem_request *request;
-
- request = list_entry(dev_priv->mm.request_list.next,
- struct drm_i915_gem_request,
- list);
-
- ret = i915_wait_request(dev, request->seqno);
- if (ret) {
- break;
- }
- /* if waiting caused an object to become inactive,
- * then loop around and wait for it. Otherwise, we
- * assume that waiting freed and unbound something,
- * so there should now be some space in the GTT
- */
- if (!list_empty(&dev_priv->mm.inactive_list))
- continue;
- break;
- }
-
- /* If we didn't have anything on the request list but there
- * are buffers awaiting a flush, emit one and try again.
- * When we wait on it, those buffers waiting for that flush
- * will get moved to inactive.
- */
- if (!list_empty(&dev_priv->mm.flushing_list)) {
- obj_priv = list_entry(dev_priv->mm.flushing_list.next,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
-
- i915_gem_flush(dev,
- obj->write_domain,
- obj->write_domain);
- (void) i915_add_request(dev, obj->write_domain);
-
- obj = NULL;
- continue;
- }
-
- DRM_ERROR("inactive empty %d request empty %d "
- "flushing empty %d\n",
- list_empty(&dev_priv->mm.inactive_list),
- list_empty(&dev_priv->mm.request_list),
- list_empty(&dev_priv->mm.flushing_list));
- /* If we didn't do any of the above, there's nothing to be done
- * and we just can't fit it in.
- */
- return ENOMEM;
- }
- return ret;
-}
-
-static int
-i915_gem_evict_everything(struct drm_device *dev)
-{
- int ret;
-
- for (;;) {
- ret = i915_gem_evict_something(dev);
- if (ret != 0)
- break;
- }
- if (ret == ENOMEM)
- return 0;
- else
- DRM_ERROR("evict_everything ret %d", ret);
- return ret;
-}
-
-/**
- * Finds free space in the GTT aperture and binds the object there.
- */
-static int
-i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, uint32_t alignment)
-{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- struct drm_mm_node *free_space;
- int page_count, ret;
-
- if (dev_priv->mm.suspended)
- return EBUSY;
- if (alignment == 0)
- alignment = PAGE_SIZE;
- if (alignment & (PAGE_SIZE - 1)) {
- DRM_ERROR("Invalid object alignment requested %u\n", alignment);
- return EINVAL;
- }
-
- if (obj_priv->gtt_space) {
- DRM_ERROR("Already bind!!");
- return 0;
- }
-search_free:
- free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
- (unsigned long) obj->size, alignment, 0);
- if (free_space != NULL) {
- obj_priv->gtt_space = drm_mm_get_block(free_space, (unsigned long) obj->size,
- alignment);
- if (obj_priv->gtt_space != NULL) {
- obj_priv->gtt_space->private = obj;
- obj_priv->gtt_offset = obj_priv->gtt_space->start;
- }
- }
- if (obj_priv->gtt_space == NULL) {
- /* If the gtt is empty and we're still having trouble
- * fitting our object in, we're out of memory.
- */
- if (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list)) {
- DRM_ERROR("GTT full, but LRU list empty\n");
- return ENOMEM;
- }
-
- ret = i915_gem_evict_something(dev);
- if (ret != 0) {
- DRM_ERROR("Failed to evict a buffer %d\n", ret);
- return ret;
- }
- goto search_free;
- }
-
- ret = i915_gem_object_get_page_list(obj);
- if (ret) {
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
- DRM_ERROR("bind to gtt failed to get page list");
- return ret;
- }
-
- page_count = obj->size / PAGE_SIZE;
- /* Create an AGP memory structure pointing at our pages, and bind it
- * into the GTT.
- */
- DRM_DEBUG("Binding object %d of page_count %d at gtt_offset 0x%x obj->pfnarray = 0x%lx",
- obj->name, page_count, obj_priv->gtt_offset, obj->pfnarray);
-
- obj_priv->agp_mem = drm_agp_bind_pages(dev,
- obj->pfnarray,
- page_count,
- obj_priv->gtt_offset);
- if (obj_priv->agp_mem) {
- i915_gem_object_free_page_list(obj);
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
- DRM_ERROR("Failed to bind pages obj %d, obj 0x%lx", obj->name, obj);
- return ENOMEM;
- }
- atomic_inc(&dev->gtt_count);
- atomic_add(obj->size, &dev->gtt_memory);
-
- /* Assert that the object is not currently in any GPU domain. As it
- * wasn't in the GTT, there shouldn't be any way it could have been in
- * a GPU cache
- */
- ASSERT(!(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)));
- ASSERT(!(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)));
-
- return 0;
-}
-
-void
-i915_gem_clflush_object(struct drm_gem_object *obj)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- /* If we don't have a page list set up, then we're not pinned
- * to GPU, and we can ignore the cache flush because it'll happen
- * again at bind time.
- */
-
- if (obj_priv->page_list == NULL)
- return;
- drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
-}
-
-/** Flushes any GPU write domain for the object if it's dirty. */
-static void
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- uint32_t seqno;
-
- if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- return;
-
- /* Queue the GPU write cache flushing we need. */
- i915_gem_flush(dev, 0, obj->write_domain);
- seqno = i915_add_request(dev, obj->write_domain);
- DRM_DEBUG("flush_gpu_write_domain seqno = %d", seqno);
- obj->write_domain = 0;
- i915_gem_object_move_to_active(obj, seqno);
-}
-
-/** Flushes the GTT write domain for the object if it's dirty. */
-static void
-i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
-{
- if (obj->write_domain != I915_GEM_DOMAIN_GTT)
- return;
-
- /* No actual flushing is required for the GTT write domain. Writes
- * to it immediately go to main memory as far as we know, so there's
- * no chipset flush. It also doesn't land in render cache.
- */
- obj->write_domain = 0;
-}
-
-/** Flushes the CPU write domain for the object if it's dirty. */
-static void
-i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
-
- if (obj->write_domain != I915_GEM_DOMAIN_CPU)
- return;
-
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- obj->write_domain = 0;
-}
-
-/**
- * Moves a single object to the GTT read, and possibly write domain.
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-static int
-i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int ret;
-
- /* Not valid to be called on unbound objects. */
- if (obj_priv->gtt_space == NULL)
- return EINVAL;
-
- i915_gem_object_flush_gpu_write_domain(obj);
- /* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
- if (ret != 0) {
- DRM_ERROR("set_to_gtt_domain wait_rendering ret %d", ret);
- return ret;
- }
- /* If we're writing through the GTT domain, then CPU and GPU caches
- * will need to be invalidated at next use.
- */
- if (write)
- obj->read_domains &= I915_GEM_DOMAIN_GTT;
- i915_gem_object_flush_cpu_write_domain(obj);
-
- DRM_DEBUG("i915_gem_object_set_to_gtt_domain obj->read_domains %x ", obj->read_domains);
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- ASSERT(!((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0));
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
- if (write) {
- obj->write_domain = I915_GEM_DOMAIN_GTT;
- obj_priv->dirty = 1;
- }
-
- return 0;
-}
-
-/**
- * Moves a single object to the CPU read, and possibly write domain.
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-static int
-i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
-{
- struct drm_device *dev = obj->dev;
- int ret;
-
-
- i915_gem_object_flush_gpu_write_domain(obj);
- /* Wait on any GPU rendering and flushing to occur. */
-
- ret = i915_gem_object_wait_rendering(obj);
- if (ret != 0)
- return ret;
-
- i915_gem_object_flush_gtt_write_domain(obj);
-
- /* If we have a partially-valid cache of the object in the CPU,
- * finish invalidating it and free the per-page flags.
- */
- i915_gem_object_set_to_full_cpu_read_domain(obj);
-
- /* Flush the CPU cache if it's still invalid. */
- if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
- }
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- ASSERT(!((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0));
-
- /* If we're writing through the CPU, then the GPU read domains will
- * need to be invalidated at next use.
- */
- if (write) {
- obj->read_domains &= I915_GEM_DOMAIN_CPU;
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- }
-
- return 0;
-}
-
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Mapped to GTT
- * 4. Read by GPU
- * 5. Unmapped from GTT
- * 6. Freed
- *
- * Let's take these a step at a time
- *
- * 1. Allocated
- * Pages allocated from the kernel may still have
- * cache contents, so we set them to (CPU, CPU) always.
- * 2. Written by CPU (using pwrite)
- * The pwrite function calls set_domain (CPU, CPU) and
- * this function does nothing (as nothing changes)
- * 3. Mapped by GTT
- * This function asserts that the object is not
- * currently in any GPU-based read or write domains
- * 4. Read by GPU
- * i915_gem_execbuffer calls set_domain (COMMAND, 0).
- * As write_domain is zero, this function adds in the
- * current read domains (CPU+COMMAND, 0).
- * flush_domains is set to CPU.
- * invalidate_domains is set to COMMAND
- * clflush is run to get data out of the CPU caches
- * then i915_dev_set_domain calls i915_gem_flush to
- * emit an MI_FLUSH and drm_agp_chipset_flush
- * 5. Unmapped from GTT
- * i915_gem_object_unbind calls set_domain (CPU, CPU)
- * flush_domains and invalidate_domains end up both zero
- * so no flushing/invalidating happens
- * 6. Freed
- * yay, done
- *
- * Case 2: The shared render buffer
- *
- * 1. Allocated
- * 2. Mapped to GTT
- * 3. Read/written by GPU
- * 4. set_domain to (CPU,CPU)
- * 5. Read/written by CPU
- * 6. Read/written by GPU
- *
- * 1. Allocated
- * Same as last example, (CPU, CPU)
- * 2. Mapped to GTT
- * Nothing changes (assertions find that it is not in the GPU)
- * 3. Read/written by GPU
- * execbuffer calls set_domain (RENDER, RENDER)
- * flush_domains gets CPU
- * invalidate_domains gets GPU
- * clflush (obj)
- * MI_FLUSH and drm_agp_chipset_flush
- * 4. set_domain (CPU, CPU)
- * flush_domains gets GPU
- * invalidate_domains gets CPU
- * wait_rendering (obj) to make sure all drawing is complete.
- * This will include an MI_FLUSH to get the data from GPU
- * to memory
- * clflush (obj) to invalidate the CPU cache
- * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- * 5. Read/written by CPU
- * cache lines are loaded and dirtied
- * 6. Read written by GPU
- * Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Read by GPU
- * 4. Updated (written) by CPU again
- * 5. Read by GPU
- *
- * 1. Allocated
- * (CPU, CPU)
- * 2. Written by CPU
- * (CPU, CPU)
- * 3. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- * 4. Updated (written) by CPU again
- * (CPU, CPU)
- * flush_domains = 0 (no previous write domain)
- * invalidate_domains = 0 (no new read domains)
- * 5. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- uint32_t invalidate_domains = 0;
- uint32_t flush_domains = 0;
-
- DRM_DEBUG("%s: object %p read %08x -> %08x write %08x -> %08x\n",
- __func__, obj,
- obj->read_domains, read_domains,
- obj->write_domain, write_domain);
- /*
- * If the object isn't moving to a new write domain,
- * let the object stay in multiple read domains
- */
- if (write_domain == 0)
- read_domains |= obj->read_domains;
- else
- obj_priv->dirty = 1;
-
- /*
- * Flush the current write domain if
- * the new read domains don't match. Invalidate
- * any read domains which differ from the old
- * write domain
- */
- if (obj->write_domain && obj->write_domain != read_domains) {
- flush_domains |= obj->write_domain;
- invalidate_domains |= read_domains & ~obj->write_domain;
- }
- /*
- * Invalidate any read caches which may have
- * stale data. That is, any new read domains.
- */
- invalidate_domains |= read_domains & ~obj->read_domains;
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
- DRM_DEBUG("%s: CPU domain flush %08x invalidate %08x\n",
- __func__, flush_domains, invalidate_domains);
- i915_gem_clflush_object(obj);
- }
-
- if ((write_domain | flush_domains) != 0)
- obj->write_domain = write_domain;
- obj->read_domains = read_domains;
-
- dev->invalidate_domains |= invalidate_domains;
- dev->flush_domains |= flush_domains;
-
- DRM_DEBUG("%s: read %08x write %08x invalidate %08x flush %08x\n",
- __func__,
- obj->read_domains, obj->write_domain,
- dev->invalidate_domains, dev->flush_domains);
-
-}
-
-/**
- * Moves the object from a partially CPU read to a full one.
- *
- * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
- * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
- */
-static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- if (!obj_priv->page_cpu_valid)
- return;
-
- /* If we're partially in the CPU read domain, finish moving it in.
- */
- if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
- int i;
-
- for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
- if (obj_priv->page_cpu_valid[i])
- continue;
- drm_clflush_pages(obj_priv->page_list + i, 1);
- }
- drm_agp_chipset_flush(dev);
- }
-
- /* Free the page_cpu_valid mappings which are now stale, whether
- * or not we've got I915_GEM_DOMAIN_CPU.
- */
- drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
- DRM_MEM_DRIVER);
- obj_priv->page_cpu_valid = NULL;
-}
-
-/**
- * Set the CPU read domain on a range of the object.
- *
- * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
- * not entirely valid. The page_cpu_valid member of the object flags which
- * pages have been flushed, and will be respected by
- * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
- * of the whole object.
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
- uint64_t offset, uint64_t size)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int i, ret;
-
- if (offset == 0 && size == obj->size)
- return i915_gem_object_set_to_cpu_domain(obj, 0);
-
- i915_gem_object_flush_gpu_write_domain(obj);
- /* Wait on any GPU rendering and flushing to occur. */
- ret = i915_gem_object_wait_rendering(obj);
- if (ret != 0)
- return ret;
- i915_gem_object_flush_gtt_write_domain(obj);
-
- /* If we're already fully in the CPU read domain, we're done. */
- if (obj_priv->page_cpu_valid == NULL &&
- (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
- return 0;
-
- /* Otherwise, create/clear the per-page CPU read domain flag if we're
- * newly adding I915_GEM_DOMAIN_CPU
- */
- if (obj_priv->page_cpu_valid == NULL) {
- obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
- DRM_MEM_DRIVER);
- if (obj_priv->page_cpu_valid == NULL)
- return ENOMEM;
- } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
- (void) memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
-
- /* Flush the cache on any pages that are still invalid from the CPU's
- * perspective.
- */
- for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
- i++) {
- if (obj_priv->page_cpu_valid[i])
- continue;
-
- drm_clflush_pages(obj_priv->page_list + i, 1);
- obj_priv->page_cpu_valid[i] = 1;
- }
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- ASSERT(!((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0));
-
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
-
- return 0;
-}
-
-/**
- * Pin an object to the GTT and evaluate the relocations landing in it.
- */
-static int
-i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object *entry)
-{
- struct drm_i915_gem_relocation_entry reloc;
- struct drm_i915_gem_relocation_entry __user *relocs;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int i, ret;
-
- /* Choose the GTT offset for our buffer and put it there. */
- ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
- if (ret) {
- DRM_ERROR("failed to pin");
- return ret;
- }
- entry->offset = obj_priv->gtt_offset;
-
- relocs = (struct drm_i915_gem_relocation_entry __user *)
- (uintptr_t) entry->relocs_ptr;
- /* Apply the relocations, using the GTT aperture to avoid cache
- * flushing requirements.
- */
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_gem_object *target_obj;
- struct drm_i915_gem_object *target_obj_priv;
- uint32_t reloc_val, reloc_offset, *reloc_entry;
-
- ret = DRM_COPY_FROM_USER(&reloc, relocs + i, sizeof(reloc));
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- DRM_ERROR("failed to copy from user");
- return ret;
- }
-
- target_obj = drm_gem_object_lookup(file_priv,
- reloc.target_handle);
- if (target_obj == NULL) {
- i915_gem_object_unpin(obj);
- return EBADF;
- }
- target_obj_priv = target_obj->driver_private;
-
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (target_obj_priv->gtt_space == NULL) {
- DRM_ERROR("No GTT space found for object %d\n",
- reloc.target_handle);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return EINVAL;
- }
-
- if (reloc.offset > obj->size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset, (int) obj->size);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return EINVAL;
- }
- if (reloc.offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return EINVAL;
- }
-
- if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
- reloc.read_domains & I915_GEM_DOMAIN_CPU) {
- DRM_ERROR("reloc with read/write CPU domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.read_domains,
- reloc.write_domain);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return EINVAL;
- }
-
- if (reloc.write_domain && target_obj->pending_write_domain &&
- reloc.write_domain != target_obj->pending_write_domain) {
- DRM_ERROR("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.write_domain,
- target_obj->pending_write_domain);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return EINVAL;
- }
- DRM_DEBUG("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc.offset,
- (int) reloc.target_handle,
- (int) reloc.read_domains,
- (int) reloc.write_domain,
- (int) target_obj_priv->gtt_offset,
- (int) reloc.presumed_offset,
- reloc.delta);
-
- target_obj->pending_read_domains |= reloc.read_domains;
- target_obj->pending_write_domain |= reloc.write_domain;
-
- /* If the relocation already has the right value in it, no
- * more work needs to be done.
- */
- if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
- drm_gem_object_unreference(target_obj);
- continue;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
- if (ret != 0) {
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return EINVAL;
- }
-
- /* Map the page containing the relocation we're going to
- * perform.
- */
-
- int reloc_base = (reloc.offset & ~(PAGE_SIZE-1));
- reloc_offset = reloc.offset & (PAGE_SIZE-1);
- reloc_entry = (uint32_t *)(uintptr_t)(obj_priv->page_list[reloc_base/PAGE_SIZE] + reloc_offset);
- reloc_val = target_obj_priv->gtt_offset + reloc.delta;
- *reloc_entry = reloc_val;
-
- /* Write the updated presumed offset for this entry back out
- * to the user.
- */
- reloc.presumed_offset = target_obj_priv->gtt_offset;
- ret = DRM_COPY_TO_USER(relocs + i, &reloc, sizeof(reloc));
- if (ret != 0) {
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- DRM_ERROR("%s: Failed to copy to user ret %d", __func__, ret);
- return ret;
- }
-
- drm_gem_object_unreference(target_obj);
- }
-
- return 0;
-}
-
-/** Dispatch a batchbuffer to the ring
- */
-static int
-i915_dispatch_gem_execbuffer(struct drm_device *dev,
- struct drm_i915_gem_execbuffer *exec,
- uint64_t exec_offset)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
- (uintptr_t) exec->cliprects_ptr;
- int nbox = exec->num_cliprects;
- int i = 0, count;
- uint64_t exec_start, exec_len;
- RING_LOCALS;
-
- exec_start = exec_offset + exec->batch_start_offset;
- exec_len = exec->batch_len;
-
- if ((exec_start | exec_len) & 0x7) {
- DRM_ERROR("alignment\n");
- return EINVAL;
- }
-
- if (!exec_start) {
- DRM_ERROR("wrong arg");
- return EINVAL;
- }
-
- count = nbox ? nbox : 1;
-
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- int ret = i915_emit_box(dev, boxes, i,
- exec->DR1, exec->DR4);
- if (ret) {
- DRM_ERROR("i915_emit_box %d DR1 0x%lx DRI2 0x%lx", ret, exec->DR1, exec->DR4);
- return ret;
- }
- }
- if (IS_I830(dev) || IS_845G(dev)) {
- BEGIN_LP_RING(4);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(exec_start | MI_BATCH_NON_SECURE);
- OUT_RING(exec_start + exec_len - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(2);
- if (IS_I965G(dev)) {
- OUT_RING(MI_BATCH_BUFFER_START |
- (2 << 6) |
- (3 << 9) |
- MI_BATCH_NON_SECURE_I965);
- OUT_RING(exec_start);
-
- } else {
- OUT_RING(MI_BATCH_BUFFER_START |
- (2 << 6));
- OUT_RING(exec_start | MI_BATCH_NON_SECURE);
- }
- ADVANCE_LP_RING();
- }
- }
- /* XXX breadcrumb */
- return 0;
-}
-
-/* Throttle our rendering by waiting until the ring has completed our requests
- * emitted over 20 msec ago.
- *
- * This should get us reasonable parallelism between CPU and GPU but also
- * relatively low latency when blocking on a particular request to finish.
- */
-static int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
- int ret = 0;
- uint32_t seqno;
-
- spin_lock(&dev->struct_mutex);
- seqno = i915_file_priv->mm.last_gem_throttle_seqno;
- i915_file_priv->mm.last_gem_throttle_seqno =
- i915_file_priv->mm.last_gem_seqno;
- if (seqno) {
- ret = i915_wait_request(dev, seqno);
- if (ret != 0)
- DRM_ERROR("%s: i915_wait_request request->seqno %d now %d\n", __func__, seqno, i915_get_gem_seqno(dev));
- }
- spin_unlock(&dev->struct_mutex);
- return ret;
-}
-
-/*ARGSUSED*/
-int
-i915_gem_execbuffer(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_file_private *i915_file_priv = fpriv->driver_priv;
- struct drm_i915_gem_execbuffer args;
- struct drm_i915_gem_exec_object *exec_list = NULL;
- struct drm_gem_object **object_list = NULL;
- struct drm_gem_object *batch_obj;
- struct drm_i915_gem_object *obj_priv;
- int ret = 0, i, pinned = 0;
- uint64_t exec_offset;
- uint32_t seqno, flush_domains;
- int pin_tries;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- DRM_COPYFROM_WITH_RETURN(&args,
- (struct drm_i915_gem_execbuffer __user *) data, sizeof(args));
-
- DRM_DEBUG("buffer_count %d len %x\n", args.buffer_count, args.batch_len);
-
- if (args.buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args.buffer_count);
- return EINVAL;
- }
- /* Copy in the exec list from userland */
- exec_list = drm_calloc(sizeof(*exec_list), args.buffer_count,
- DRM_MEM_DRIVER);
- object_list = drm_calloc(sizeof(*object_list), args.buffer_count,
- DRM_MEM_DRIVER);
- if (exec_list == NULL || object_list == NULL) {
- DRM_ERROR("Failed to allocate exec or object list "
- "for %d buffers\n",
- args.buffer_count);
- ret = ENOMEM;
- goto pre_mutex_err;
- }
-
- ret = DRM_COPY_FROM_USER(exec_list,
- (struct drm_i915_gem_exec_object __user *)
- (uintptr_t) args.buffers_ptr,
- sizeof(*exec_list) * args.buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args.buffer_count, ret);
- goto pre_mutex_err;
- }
- spin_lock(&dev->struct_mutex);
-
- if (dev_priv->mm.wedged) {
- DRM_ERROR("Execbuf while wedged\n");
- spin_unlock(&dev->struct_mutex);
- return EIO;
- }
-
- if (dev_priv->mm.suspended) {
- DRM_ERROR("Execbuf while VT-switched.\n");
- spin_unlock(&dev->struct_mutex);
- return EBUSY;
- }
-
- /* Look up object handles */
- for (i = 0; i < args.buffer_count; i++) {
- object_list[i] = drm_gem_object_lookup(fpriv,
- exec_list[i].handle);
- if (object_list[i] == NULL) {
- DRM_ERROR("Invalid object handle %d at index %d\n",
- exec_list[i].handle, i);
- ret = EBADF;
- goto err;
- }
- obj_priv = object_list[i]->driver_private;
- if (obj_priv->in_execbuffer) {
- DRM_ERROR("Object[%d] (%d) %p appears more than once in object list in args.buffer_count %d \n",
- i, object_list[i]->name, object_list[i], args.buffer_count);
-
- ret = EBADF;
- goto err;
- }
-
- obj_priv->in_execbuffer = 1;
- }
-
- /* Pin and relocate */
- for (pin_tries = 0; ; pin_tries++) {
- ret = 0;
- for (i = 0; i < args.buffer_count; i++) {
- object_list[i]->pending_read_domains = 0;
- object_list[i]->pending_write_domain = 0;
- ret = i915_gem_object_pin_and_relocate(object_list[i],
- fpriv,
- &exec_list[i]);
- if (ret) {
- DRM_ERROR("Not all object pinned");
- break;
- }
- pinned = i + 1;
- }
- /* success */
- if (ret == 0)
- {
- DRM_DEBUG("gem_execbuffer pin_relocate success");
- break;
- }
- /* error other than GTT full, or we've already tried again */
- if (ret != ENOMEM || pin_tries >= 1) {
- if (ret != ERESTART)
- DRM_ERROR("Failed to pin buffers %d\n", ret);
- goto err;
- }
-
- /* unpin all of our buffers */
- for (i = 0; i < pinned; i++)
- i915_gem_object_unpin(object_list[i]);
- pinned = 0;
-
- /* evict everyone we can from the aperture */
- ret = i915_gem_evict_everything(dev);
- if (ret)
- goto err;
- }
-
- /* Set the pending read domains for the batch buffer to COMMAND */
- batch_obj = object_list[args.buffer_count-1];
- batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
- batch_obj->pending_write_domain = 0;
-
- /* Zero the gloabl flush/invalidate flags. These
- * will be modified as each object is bound to the
- * gtt
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
-
- for (i = 0; i < args.buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
-
- /* Compute new gpu domains and update invalidate/flush */
- i915_gem_object_set_to_gpu_domain(obj,
- obj->pending_read_domains,
- obj->pending_write_domain);
- }
-
- if (dev->invalidate_domains | dev->flush_domains) {
-
- DRM_DEBUG("%s: invalidate_domains %08x flush_domains %08x Then flush\n",
- __func__,
- dev->invalidate_domains,
- dev->flush_domains);
- i915_gem_flush(dev,
- dev->invalidate_domains,
- dev->flush_domains);
- if (dev->flush_domains) {
- (void) i915_add_request(dev, dev->flush_domains);
-
- }
- }
-
- for (i = 0; i < args.buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
-
- obj->write_domain = obj->pending_write_domain;
- }
-
- exec_offset = exec_list[args.buffer_count - 1].offset;
-
- /* Exec the batchbuffer */
- ret = i915_dispatch_gem_execbuffer(dev, &args, exec_offset);
- if (ret) {
- DRM_ERROR("dispatch failed %d\n", ret);
- goto err;
- }
-
- /*
- * Ensure that the commands in the batch buffer are
- * finished before the interrupt fires
- */
- flush_domains = i915_retire_commands(dev);
-
- /*
- * Get a seqno representing the execution of the current buffer,
- * which we can wait on. We would like to mitigate these interrupts,
- * likely by only creating seqnos occasionally (so that we have
- * *some* interrupts representing completion of buffers that we can
- * wait on when trying to clear up gtt space).
- */
- seqno = i915_add_request(dev, flush_domains);
- ASSERT(!(seqno == 0));
- i915_file_priv->mm.last_gem_seqno = seqno;
- for (i = 0; i < args.buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
- i915_gem_object_move_to_active(obj, seqno);
- DRM_DEBUG("%s: move to exec list %p\n", __func__, obj);
- }
-
-err:
- if (object_list != NULL) {
- for (i = 0; i < pinned; i++)
- i915_gem_object_unpin(object_list[i]);
-
- for (i = 0; i < args.buffer_count; i++) {
- if (object_list[i]) {
- obj_priv = object_list[i]->driver_private;
- obj_priv->in_execbuffer = 0;
- }
- drm_gem_object_unreference(object_list[i]);
- }
- }
- spin_unlock(&dev->struct_mutex);
-
- if (!ret) {
- /* Copy the new buffer offsets back to the user's exec list. */
- ret = DRM_COPY_TO_USER((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args.buffers_ptr,
- exec_list,
- sizeof(*exec_list) * args.buffer_count);
- if (ret)
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args.buffer_count, ret);
- }
-
-pre_mutex_err:
- drm_free(object_list, sizeof(*object_list) * args.buffer_count,
- DRM_MEM_DRIVER);
- drm_free(exec_list, sizeof(*exec_list) * args.buffer_count,
- DRM_MEM_DRIVER);
-
- return ret;
-}
-
-int
-i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int ret;
-
- if (obj_priv->gtt_space == NULL) {
- ret = i915_gem_object_bind_to_gtt(obj, alignment);
- if (ret != 0) {
- DRM_ERROR("Failure to bind: %d", ret);
- return ret;
- }
- }
- obj_priv->pin_count++;
-
- /* If the object is not active and not pending a flush,
- * remove it from the inactive list
- */
- if (obj_priv->pin_count == 1) {
- atomic_inc(&dev->pin_count);
- atomic_add(obj->size, &dev->pin_memory);
- if (!obj_priv->active &&
- (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)) == 0 &&
- !list_empty(&obj_priv->list))
- list_del_init(&obj_priv->list);
- }
- return 0;
-}
-
-void
-i915_gem_object_unpin(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- obj_priv->pin_count--;
- ASSERT(!(obj_priv->pin_count < 0));
- ASSERT(!(obj_priv->gtt_space == NULL));
-
- /* If the object is no longer pinned, and is
- * neither active nor being flushed, then stick it on
- * the inactive list
- */
- if (obj_priv->pin_count == 0) {
- if (!obj_priv->active &&
- (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)) == 0)
- list_move_tail(&obj_priv->list,
- &dev_priv->mm.inactive_list, (caddr_t)obj_priv);
- atomic_dec(&dev->pin_count);
- atomic_sub(obj->size, &dev->pin_memory);
- }
-}
-
-/*ARGSUSED*/
-int
-i915_gem_pin_ioctl(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- struct drm_i915_gem_pin args;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- DRM_COPYFROM_WITH_RETURN(&args,
- (struct drm_i915_gem_pin __user *) data, sizeof(args));
-
- spin_lock(&dev->struct_mutex);
-
- obj = drm_gem_object_lookup(fpriv, args.handle);
- if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
- args.handle);
- spin_unlock(&dev->struct_mutex);
- return EBADF;
- }
- DRM_DEBUG("i915_gem_pin_ioctl obj->name %d", obj->name);
- obj_priv = obj->driver_private;
-
- if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != fpriv) {
- DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
- args.handle);
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
- return EINVAL;
- }
-
- obj_priv->user_pin_count++;
- obj_priv->pin_filp = fpriv;
- if (obj_priv->user_pin_count == 1) {
- ret = i915_gem_object_pin(obj, args.alignment);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
- return ret;
- }
- }
-
- /* XXX - flush the CPU caches for pinned objects
- * as the X server doesn't manage domains yet
- */
- i915_gem_object_flush_cpu_write_domain(obj);
- args.offset = obj_priv->gtt_offset;
-
- ret = DRM_COPY_TO_USER((struct drm_i915_gem_pin __user *) data, &args, sizeof(args));
- if ( ret != 0)
- DRM_ERROR(" gem pin ioctl error! %d", ret);
-
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/*ARGSUSED*/
-int
-i915_gem_unpin_ioctl(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- struct drm_i915_gem_pin args;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- DRM_COPYFROM_WITH_RETURN(&args,
- (struct drm_i915_gem_pin __user *) data, sizeof(args));
-
- spin_lock(&dev->struct_mutex);
-
- obj = drm_gem_object_lookup(fpriv, args.handle);
- if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
- args.handle);
- spin_unlock(&dev->struct_mutex);
- return EBADF;
- }
- obj_priv = obj->driver_private;
- DRM_DEBUG("i915_gem_unpin_ioctl, obj->name %d", obj->name);
- if (obj_priv->pin_filp != fpriv) {
- DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
- args.handle);
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
- return EINVAL;
- }
- obj_priv->user_pin_count--;
- if (obj_priv->user_pin_count == 0) {
- obj_priv->pin_filp = NULL;
- i915_gem_object_unpin(obj);
- }
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/*ARGSUSED*/
-int
-i915_gem_busy_ioctl(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- struct drm_i915_gem_busy args;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- DRM_COPYFROM_WITH_RETURN(&args,
- (struct drm_i915_gem_busy __user *) data, sizeof(args));
-
- spin_lock(&dev->struct_mutex);
- obj = drm_gem_object_lookup(fpriv, args.handle);
- if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
- args.handle);
- spin_unlock(&dev->struct_mutex);
- return EBADF;
- }
-
- obj_priv = obj->driver_private;
- /* Don't count being on the flushing list against the object being
- * done. Otherwise, a buffer left on the flushing list but not getting
- * flushed (because nobody's flushing that domain) won't ever return
- * unbusy and get reused by libdrm's bo cache. The other expected
- * consumer of this interface, OpenGL's occlusion queries, also specs
- * that the objects get unbusy "eventually" without any interference.
- */
- args.busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
- DRM_DEBUG("i915_gem_busy_ioctl call obj->name %d busy %d", obj->name, args.busy);
-
- ret = DRM_COPY_TO_USER((struct drm_i915_gem_busy __user *) data, &args, sizeof(args));
- if ( ret != 0)
- DRM_ERROR(" gem busy error! %d", ret);
-
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/*ARGSUSED*/
-int
-i915_gem_throttle_ioctl(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- return i915_gem_ring_throttle(dev, fpriv);
-}
-
-static int
-i915_gem_object_get_page_list(struct drm_gem_object *obj)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- caddr_t va;
- long i;
-
- if (obj_priv->page_list)
- return 0;
- pgcnt_t np = btop(obj->size);
-
- obj_priv->page_list = kmem_zalloc(np * sizeof(caddr_t), KM_SLEEP);
- if (obj_priv->page_list == NULL) {
- DRM_ERROR("Faled to allocate page list\n");
- return ENOMEM;
- }
-
- for (i = 0, va = obj->kaddr; i < np; i++, va += PAGESIZE) {
- obj_priv->page_list[i] = va;
- }
- return 0;
-}
-
-
-int i915_gem_init_object(struct drm_gem_object *obj)
-{
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
- if (obj_priv == NULL)
- return ENOMEM;
-
- /*
- * We've just allocated pages from the kernel,
- * so they've just been written by the CPU with
- * zeros. They'll need to be clflushed before we
- * use them with the GPU.
- */
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
-
- obj->driver_private = obj_priv;
- obj_priv->obj = obj;
- INIT_LIST_HEAD(&obj_priv->list);
- return 0;
-}
-
-void i915_gem_free_object(struct drm_gem_object *obj)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- while (obj_priv->pin_count > 0)
- i915_gem_object_unpin(obj);
-
- DRM_DEBUG("%s: obj %d",__func__, obj->name);
-
- (void) i915_gem_object_unbind(obj, 1);
- if (obj_priv->page_cpu_valid != NULL)
- drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, DRM_MEM_DRIVER);
- drm_free(obj->driver_private, sizeof(*obj_priv), DRM_MEM_DRIVER);
-}
-
-/** Unbinds all objects that are on the given buffer list. */
-static int
-i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head, uint32_t type)
-{
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- while (!list_empty(head)) {
- obj_priv = list_entry(head->next,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
-
- if (obj_priv->pin_count != 0) {
- DRM_ERROR("Pinned object in unbind list\n");
- spin_unlock(&dev->struct_mutex);
- return EINVAL;
- }
- DRM_DEBUG("%s: obj %d type %d",__func__, obj->name, type);
- ret = i915_gem_object_unbind(obj, type);
- if (ret != 0) {
- DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
- ret);
- spin_unlock(&dev->struct_mutex);
- return ret;
- }
- }
-
-
- return 0;
-}
-
-static int
-i915_gem_idle(struct drm_device *dev, uint32_t type)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t seqno, cur_seqno, last_seqno;
- int stuck, ret;
-
- spin_lock(&dev->struct_mutex);
-
- if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
- spin_unlock(&dev->struct_mutex);
- return 0;
- }
-
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- */
- dev_priv->mm.suspended = 1;
-
- /* Cancel the retire work handler, wait for it to finish if running
- */
- if (worktimer_id != NULL) {
- (void) untimeout(worktimer_id);
- worktimer_id = NULL;
- }
-
- i915_kernel_lost_context(dev);
-
- /* Flush the GPU along with all non-CPU write domains
- */
- i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
- ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
- seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT));
- if (seqno == 0) {
- spin_unlock(&dev->struct_mutex);
- return ENOMEM;
- }
-
- dev_priv->mm.waiting_gem_seqno = seqno;
- last_seqno = 0;
- stuck = 0;
- for (;;) {
- cur_seqno = i915_get_gem_seqno(dev);
- if (i915_seqno_passed(cur_seqno, seqno))
- break;
- if (last_seqno == cur_seqno) {
- if (stuck++ > 100) {
- DRM_ERROR("hardware wedged\n");
- dev_priv->mm.wedged = 1;
- DRM_WAKEUP(&dev_priv->irq_queue);
- break;
- }
- }
- DRM_UDELAY(10);
- last_seqno = cur_seqno;
- }
- dev_priv->mm.waiting_gem_seqno = 0;
-
- i915_gem_retire_requests(dev);
-
- /* Empty the active and flushing lists to inactive. If there's
- * anything left at this point, it means that we're wedged and
- * nothing good's going to happen by leaving them there. So strip
- * the GPU domains and just stuff them onto inactive.
- */
- while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = list_entry(dev_priv->mm.active_list.next,
- struct drm_i915_gem_object,
- list);
- obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj_priv->obj);
- }
-
- while (!list_empty(&dev_priv->mm.flushing_list)) {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = list_entry(dev_priv->mm.flushing_list.next,
- struct drm_i915_gem_object,
- list);
- obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj_priv->obj);
- }
-
- /* Move all inactive buffers out of the GTT. */
- ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list, type);
- ASSERT(list_empty(&dev_priv->mm.inactive_list));
- if (ret) {
- spin_unlock(&dev->struct_mutex);
- return ret;
- }
-
- i915_gem_cleanup_ringbuffer(dev);
- spin_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static int
-i915_gem_init_hws(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- /* If we need a physical address for the status page, it's already
- * initialized at driver load time.
- */
- if (!I915_NEED_GFX_HWS(dev))
- return 0;
-
-
- obj = drm_gem_object_alloc(dev, 4096);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate status page\n");
- return ENOMEM;
- }
-
- obj_priv = obj->driver_private;
-
- ret = i915_gem_object_pin(obj, 4096);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- return ret;
- }
-
- dev_priv->status_gfx_addr = obj_priv->gtt_offset;
- dev_priv->hws_map.offset = dev->agp->agp_info.agpi_aperbase + obj_priv->gtt_offset;
- dev_priv->hws_map.size = 4096;
- dev_priv->hws_map.type = 0;
- dev_priv->hws_map.flags = 0;
- dev_priv->hws_map.mtrr = 0;
-
- drm_core_ioremap(&dev_priv->hws_map, dev);
- if (dev_priv->hws_map.handle == NULL) {
- DRM_ERROR("Failed to map status page.\n");
- (void) memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- drm_gem_object_unreference(obj);
- return EINVAL;
- }
-
- dev_priv->hws_obj = obj;
-
- dev_priv->hw_status_page = dev_priv->hws_map.handle;
-
- (void) memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- (void) I915_READ(HWS_PGA); /* posting read */
- DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
-
- return 0;
-}
-
-static void
-i915_gem_cleanup_hws(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
-
- if (dev_priv->hws_obj == NULL)
- return;
-
- obj = dev_priv->hws_obj;
-
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- dev_priv->hws_obj = NULL;
-
- (void) memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- dev_priv->hw_status_page = NULL;
-
- /* Write high address into HWS_PGA when disabling. */
- I915_WRITE(HWS_PGA, 0x1ffff000);
-}
-
-int
-i915_gem_init_ringbuffer(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
- u32 head;
-
- ret = i915_gem_init_hws(dev);
- if (ret != 0)
- return ret;
- obj = drm_gem_object_alloc(dev, 128 * 1024);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate ringbuffer\n");
- i915_gem_cleanup_hws(dev);
- return ENOMEM;
- }
-
- obj_priv = obj->driver_private;
- ret = i915_gem_object_pin(obj, 4096);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- i915_gem_cleanup_hws(dev);
- return ret;
- }
-
- /* Set up the kernel mapping for the ring. */
- dev_priv->ring.Size = obj->size;
- dev_priv->ring.tail_mask = obj->size - 1;
-
- dev_priv->ring.map.offset = dev->agp->agp_info.agpi_aperbase + obj_priv->gtt_offset;
- dev_priv->ring.map.size = obj->size;
- dev_priv->ring.map.type = 0;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_core_ioremap(&dev_priv->ring.map, dev);
- if (dev_priv->ring.map.handle == NULL) {
- DRM_ERROR("Failed to map ringbuffer.\n");
- (void) memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
- drm_gem_object_unreference(obj);
- i915_gem_cleanup_hws(dev);
- return EINVAL;
- }
-
- dev_priv->ring.ring_obj = obj;
-
- dev_priv->ring.virtual_start = (u8 *) dev_priv->ring.map.handle;
-
- /* Stop the ring if it's running. */
- I915_WRITE(PRB0_CTL, 0);
- I915_WRITE(PRB0_HEAD, 0);
- I915_WRITE(PRB0_TAIL, 0);
-
-
- /* Initialize the ring. */
- I915_WRITE(PRB0_START, obj_priv->gtt_offset);
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
- /* G45 ring initialization fails to reset head to zero */
- if (head != 0) {
- DRM_ERROR("Ring head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- I915_WRITE(PRB0_HEAD, 0);
-
- DRM_ERROR("Ring head forced to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- }
-
- I915_WRITE(PRB0_CTL,
- ((obj->size - 4096) & RING_NR_PAGES) |
- RING_NO_REPORT |
- RING_VALID);
-
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-
- /* If the head is still not zero, the ring is dead */
- if (head != 0) {
- DRM_ERROR("Ring initialization failed "
- "ctl %08x head %08x tail %08x start %08x\n",
- I915_READ(PRB0_CTL),
- I915_READ(PRB0_HEAD),
- I915_READ(PRB0_TAIL),
- I915_READ(PRB0_START));
- return EIO;
- }
-
- /* Update our cache of the ring state */
- i915_kernel_lost_context(dev);
-
- return 0;
-}
-
-static void
-i915_gem_cleanup_ringbuffer(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (dev_priv->ring.ring_obj == NULL)
- return;
-
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
-
- i915_gem_object_unpin(dev_priv->ring.ring_obj);
- drm_gem_object_unreference(dev_priv->ring.ring_obj);
- dev_priv->ring.ring_obj = NULL;
- (void) memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
- i915_gem_cleanup_hws(dev);
-}
-
-/*ARGSUSED*/
-int
-i915_gem_entervt_ioctl(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- if (dev_priv->mm.wedged) {
- DRM_ERROR("Reenabling wedged hardware, good luck\n");
- dev_priv->mm.wedged = 0;
- }
- /* Set up the kernel mapping for the ring. */
- dev_priv->mm.gtt_mapping.offset = dev->agp->agp_info.agpi_aperbase;
- dev_priv->mm.gtt_mapping.size = dev->agp->agp_info.agpi_apersize;
- dev_priv->mm.gtt_mapping.type = 0;
- dev_priv->mm.gtt_mapping.flags = 0;
- dev_priv->mm.gtt_mapping.mtrr = 0;
-
- drm_core_ioremap(&dev_priv->mm.gtt_mapping, dev);
-
- spin_lock(&dev->struct_mutex);
- dev_priv->mm.suspended = 0;
- ret = i915_gem_init_ringbuffer(dev);
- if (ret != 0)
- return ret;
-
- spin_unlock(&dev->struct_mutex);
-
- (void) drm_irq_install(dev);
-
- return 0;
-}
-
-/*ARGSUSED*/
-int
-i915_gem_leavevt_ioctl(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- ret = i915_gem_idle(dev, 0);
- (void) drm_irq_uninstall(dev);
-
- drm_core_ioremapfree(&dev_priv->mm.gtt_mapping, dev);
- return ret;
-}
-
-void
-i915_gem_lastclose(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
-
- ret = i915_gem_idle(dev, 1);
- if (ret)
- DRM_ERROR("failed to idle hardware: %d\n", ret);
-
- drm_mm_clean_ml(&dev_priv->mm.gtt_space);
-}
-
-void
-i915_gem_load(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- INIT_LIST_HEAD(&dev_priv->mm.active_list);
- INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
- INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
- INIT_LIST_HEAD(&dev_priv->mm.request_list);
- dev_priv->mm.next_gem_seqno = 1;
-
- i915_gem_detect_bit_6_swizzle(dev);
-
-}
-
diff --git a/usr/src/uts/intel/io/drm/i915_gem_debug.c b/usr/src/uts/intel/io/drm/i915_gem_debug.c
deleted file mode 100644
index 08580bc..0000000
--- a/usr/src/uts/intel/io/drm/i915_gem_debug.c
+++ /dev/null
@@ -1,1108 +0,0 @@
-/* BEGIN CSTYLED */
-
-/*
- * Copyright (c) 2009, Intel Corporation.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Keith Packard <keithp@keithp.com>
- *
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-#define BUFFER_FAIL(_count, _len, _name) { \
- DRM_ERROR("Buffer size too small in %s (%d < %d)\n", \
- (_name), (_count), (_len)); \
- (*failures)++; \
- return count; \
-}
-
-
-static uint32_t saved_s2 = 0, saved_s4 = 0;
-static char saved_s2_set = 0, saved_s4_set = 0;
-
-static void
-instr_out(uint32_t *data, uint32_t hw_offset, unsigned int index,
- const char *fmt, ...)
-{
-
- DRM_ERROR("0x%08x: 0x%08x:%s ", hw_offset + index * 4, data[index],
- index == 0 ? "" : " ");
- va_list ap;
-
- va_start(ap, fmt);
- vcmn_err(CE_WARN, fmt, ap);
- va_end(ap);
-
-}
-
-static int
-decode_mi(uint32_t *data, int count, uint32_t hw_offset, int *failures)
-{
- unsigned int opcode;
-
- struct {
- uint32_t opcode;
- int min_len;
- int max_len;
- char *name;
- } opcodes_mi[] = {
- { 0x08, 1, 1, "MI_ARB_ON_OFF" },
- { 0x0a, 1, 1, "MI_BATCH_BUFFER_END" },
- { 0x31, 2, 2, "MI_BATCH_BUFFER_START" },
- { 0x14, 3, 3, "MI_DISPLAY_BUFFER_INFO" },
- { 0x04, 1, 1, "MI_FLUSH" },
- { 0x22, 3, 3, "MI_LOAD_REGISTER_IMM" },
- { 0x13, 2, 2, "MI_LOAD_SCAN_LINES_EXCL" },
- { 0x12, 2, 2, "MI_LOAD_SCAN_LINES_INCL" },
- { 0x00, 1, 1, "MI_NOOP" },
- { 0x11, 2, 2, "MI_OVERLAY_FLIP" },
- { 0x07, 1, 1, "MI_REPORT_HEAD" },
- { 0x18, 2, 2, "MI_SET_CONTEXT" },
- { 0x20, 3, 4, "MI_STORE_DATA_IMM" },
- { 0x21, 3, 4, "MI_STORE_DATA_INDEX" },
- { 0x24, 3, 3, "MI_STORE_REGISTER_MEM" },
- { 0x02, 1, 1, "MI_USER_INTERRUPT" },
- { 0x03, 1, 1, "MI_WAIT_FOR_EVENT" },
- };
-
-
- for (opcode = 0; opcode < sizeof(opcodes_mi) / sizeof(opcodes_mi[0]);
- opcode++) {
- if ((data[0] & 0x1f800000) >> 23 == opcodes_mi[opcode].opcode) {
- unsigned int len = 1, i;
-
- instr_out(data, hw_offset, 0, "%s\n", opcodes_mi[opcode].name);
- if (opcodes_mi[opcode].max_len > 1) {
- len = (data[0] & 0x000000ff) + 2;
- if (len < opcodes_mi[opcode].min_len ||
- len > opcodes_mi[opcode].max_len)
- {
- DRM_ERROR("Bad length in %s\n",
- opcodes_mi[opcode].name);
- }
- }
-
- for (i = 1; i < len; i++) {
- if (i >= count)
- BUFFER_FAIL(count, len, opcodes_mi[opcode].name);
- instr_out(data, hw_offset, i, "dword %d\n", i);
- }
-
- return len;
- }
- }
-
- instr_out(data, hw_offset, 0, "MI UNKNOWN\n");
- (*failures)++;
- return 1;
-}
-
-static int
-decode_2d(uint32_t *data, int count, uint32_t hw_offset, int *failures)
-{
- unsigned int opcode, len;
- char *format = NULL;
-
- struct {
- uint32_t opcode;
- int min_len;
- int max_len;
- char *name;
- } opcodes_2d[] = {
- { 0x40, 5, 5, "COLOR_BLT" },
- { 0x43, 6, 6, "SRC_COPY_BLT" },
- { 0x01, 8, 8, "XY_SETUP_BLT" },
- { 0x11, 9, 9, "XY_SETUP_MONO_PATTERN_SL_BLT" },
- { 0x03, 3, 3, "XY_SETUP_CLIP_BLT" },
- { 0x24, 2, 2, "XY_PIXEL_BLT" },
- { 0x25, 3, 3, "XY_SCANLINES_BLT" },
- { 0x26, 4, 4, "Y_TEXT_BLT" },
- { 0x31, 5, 134, "XY_TEXT_IMMEDIATE_BLT" },
- { 0x50, 6, 6, "XY_COLOR_BLT" },
- { 0x51, 6, 6, "XY_PAT_BLT" },
- { 0x76, 8, 8, "XY_PAT_CHROMA_BLT" },
- { 0x72, 7, 135, "XY_PAT_BLT_IMMEDIATE" },
- { 0x77, 9, 137, "XY_PAT_CHROMA_BLT_IMMEDIATE" },
- { 0x52, 9, 9, "XY_MONO_PAT_BLT" },
- { 0x59, 7, 7, "XY_MONO_PAT_FIXED_BLT" },
- { 0x53, 8, 8, "XY_SRC_COPY_BLT" },
- { 0x54, 8, 8, "XY_MONO_SRC_COPY_BLT" },
- { 0x71, 9, 137, "XY_MONO_SRC_COPY_IMMEDIATE_BLT" },
- { 0x55, 9, 9, "XY_FULL_BLT" },
- { 0x55, 9, 137, "XY_FULL_IMMEDIATE_PATTERN_BLT" },
- { 0x56, 9, 9, "XY_FULL_MONO_SRC_BLT" },
- { 0x75, 10, 138, "XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT" },
- { 0x57, 12, 12, "XY_FULL_MONO_PATTERN_BLT" },
- { 0x58, 12, 12, "XY_FULL_MONO_PATTERN_MONO_SRC_BLT" },
- };
-
- switch ((data[0] & 0x1fc00000) >> 22) {
- case 0x50:
- instr_out(data, hw_offset, 0,
- "XY_COLOR_BLT (rgb %sabled, alpha %sabled, dst tile %d)\n",
- (data[0] & (1 << 20)) ? "en" : "dis",
- (data[0] & (1 << 21)) ? "en" : "dis",
- (data[0] >> 11) & 1);
-
- len = (data[0] & 0x000000ff) + 2;
- if (len != 6)
- DRM_ERROR("Bad count in XY_COLOR_BLT\n");
- if (count < 6)
- BUFFER_FAIL(count, len, "XY_COLOR_BLT");
-
- switch ((data[1] >> 24) & 0x3) {
- case 0:
- format="8";
- break;
- case 1:
- format="565";
- break;
- case 2:
- format="1555";
- break;
- case 3:
- format="8888";
- break;
- }
-
- instr_out(data, hw_offset, 1, "format %s, pitch %d, "
- "clipping %sabled\n", format,
- (short)(data[1] & 0xffff),
- data[1] & (1 << 30) ? "en" : "dis");
- instr_out(data, hw_offset, 2, "(%d,%d)\n",
- data[2] & 0xffff, data[2] >> 16);
- instr_out(data, hw_offset, 3, "(%d,%d)\n",
- data[3] & 0xffff, data[3] >> 16);
- instr_out(data, hw_offset, 4, "offset 0x%08x\n", data[4]);
- instr_out(data, hw_offset, 5, "color\n");
- return len;
- case 0x53:
- instr_out(data, hw_offset, 0,
- "XY_SRC_COPY_BLT (rgb %sabled, alpha %sabled, "
- "src tile %d, dst tile %d)\n",
- (data[0] & (1 << 20)) ? "en" : "dis",
- (data[0] & (1 << 21)) ? "en" : "dis",
- (data[0] >> 15) & 1,
- (data[0] >> 11) & 1);
-
- len = (data[0] & 0x000000ff) + 2;
- if (len != 8)
- DRM_ERROR("Bad count in XY_SRC_COPY_BLT\n");
- if (count < 8)
- BUFFER_FAIL(count, len, "XY_SRC_COPY_BLT");
-
- switch ((data[1] >> 24) & 0x3) {
- case 0:
- format="8";
- break;
- case 1:
- format="565";
- break;
- case 2:
- format="1555";
- break;
- case 3:
- format="8888";
- break;
- }
-
- instr_out(data, hw_offset, 1, "format %s, dst pitch %d, "
- "clipping %sabled\n", format,
- (short)(data[1] & 0xffff),
- data[1] & (1 << 30) ? "en" : "dis");
- instr_out(data, hw_offset, 2, "dst (%d,%d)\n",
- data[2] & 0xffff, data[2] >> 16);
- instr_out(data, hw_offset, 3, "dst (%d,%d)\n",
- data[3] & 0xffff, data[3] >> 16);
- instr_out(data, hw_offset, 4, "dst offset 0x%08x\n", data[4]);
- instr_out(data, hw_offset, 5, "src (%d,%d)\n",
- data[5] & 0xffff, data[5] >> 16);
- instr_out(data, hw_offset, 6, "src pitch %d\n",
- (short)(data[6] & 0xffff));
- instr_out(data, hw_offset, 7, "src offset 0x%08x\n", data[7]);
- return len;
- }
-
- for (opcode = 0; opcode < sizeof(opcodes_2d) / sizeof(opcodes_2d[0]);
- opcode++) {
- if ((data[0] & 0x1fc00000) >> 22 == opcodes_2d[opcode].opcode) {
- unsigned int i;
-
- len = 1;
- instr_out(data, hw_offset, 0, "%s\n", opcodes_2d[opcode].name);
- if (opcodes_2d[opcode].max_len > 1) {
- len = (data[0] & 0x000000ff) + 2;
- if (len < opcodes_2d[opcode].min_len ||
- len > opcodes_2d[opcode].max_len)
- {
- DRM_ERROR("Bad count in %s\n", opcodes_2d[opcode].name);
- }
- }
-
- for (i = 1; i < len; i++) {
- if (i >= count)
- BUFFER_FAIL(count, len, opcodes_2d[opcode].name);
- instr_out(data, hw_offset, i, "dword %d\n", i);
- }
-
- return len;
- }
- }
-
- instr_out(data, hw_offset, 0, "2D UNKNOWN\n");
- (*failures)++;
- return 1;
-}
-
-/*ARGSUSED*/
-static int
-decode_3d_1c(uint32_t *data, int count, uint32_t hw_offset, int *failures)
-{
- switch ((data[0] & 0x00f80000) >> 19) {
- case 0x11:
- instr_out(data, hw_offset, 0, "3DSTATE_DEPTH_SUBRECTANGLE_DISALBE\n");
- return 1;
- case 0x10:
- instr_out(data, hw_offset, 0, "3DSTATE_SCISSOR_ENABLE\n");
- return 1;
- case 0x01:
- instr_out(data, hw_offset, 0, "3DSTATE_MAP_COORD_SET_I830\n");
- return 1;
- case 0x0a:
- instr_out(data, hw_offset, 0, "3DSTATE_MAP_CUBE_I830\n");
- return 1;
- case 0x05:
- instr_out(data, hw_offset, 0, "3DSTATE_MAP_TEX_STREAM_I830\n");
- return 1;
- }
-
- instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
- (*failures)++;
- return 1;
-}
-
-static int
-decode_3d_1d(uint32_t *data, int count, uint32_t hw_offset, int *failures, int i830)
-{
- unsigned int len, i, c, opcode, word, map, sampler, instr;
-
- struct {
- uint32_t opcode;
- int i830_only;
- int min_len;
- int max_len;
- char *name;
- } opcodes_3d_1d[] = {
- { 0x8e, 0, 3, 3, "3DSTATE_BUFFER_INFO" },
- { 0x86, 0, 4, 4, "3DSTATE_CHROMA_KEY" },
- { 0x9c, 0, 1, 1, "3DSTATE_CLEAR_PARAMETERS" },
- { 0x88, 0, 2, 2, "3DSTATE_CONSTANT_BLEND_COLOR" },
- { 0x99, 0, 2, 2, "3DSTATE_DEFAULT_DIFFUSE" },
- { 0x9a, 0, 2, 2, "3DSTATE_DEFAULT_SPECULAR" },
- { 0x98, 0, 2, 2, "3DSTATE_DEFAULT_Z" },
- { 0x97, 0, 2, 2, "3DSTATE_DEPTH_OFFSET_SCALE" },
- { 0x85, 0, 2, 2, "3DSTATE_DEST_BUFFER_VARIABLES" },
- { 0x80, 0, 5, 5, "3DSTATE_DRAWING_RECTANGLE" },
- { 0x8e, 0, 3, 3, "3DSTATE_BUFFER_INFO" },
- { 0x9d, 0, 65, 65, "3DSTATE_FILTER_COEFFICIENTS_4X4" },
- { 0x9e, 0, 4, 4, "3DSTATE_MONO_FILTER" },
- { 0x89, 0, 4, 4, "3DSTATE_FOG_MODE" },
- { 0x8f, 0, 2, 16, "3DSTATE_MAP_PALLETE_LOAD_32" },
- { 0x81, 0, 3, 3, "3DSTATE_SCISSOR_RECTANGLE" },
- { 0x83, 0, 2, 2, "3DSTATE_SPAN_STIPPLE" },
- { 0x8c, 1, 2, 2, "3DSTATE_MAP_COORD_TRANSFORM_I830" },
- { 0x8b, 1, 2, 2, "3DSTATE_MAP_VERTEX_TRANSFORM_I830" },
- { 0x8d, 1, 3, 3, "3DSTATE_W_STATE_I830" },
- { 0x01, 1, 2, 2, "3DSTATE_COLOR_FACTOR_I830" },
- { 0x02, 1, 2, 2, "3DSTATE_MAP_COORD_SETBIND_I830" },
- };
-
- switch ((data[0] & 0x00ff0000) >> 16) {
- case 0x07:
- /* This instruction is unusual. A 0 length means just 1 DWORD instead of
- * 2. The 0 length is specified in one place to be unsupported, but
- * stated to be required in another, and 0 length LOAD_INDIRECTs appear
- * to cause no harm at least.
- */
- instr_out(data, hw_offset, 0, "3DSTATE_LOAD_INDIRECT\n");
- len = (data[0] & 0x000000ff) + 1;
- i = 1;
- if (data[0] & (0x01 << 8)) {
- if (i + 2 >= count)
- BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
- instr_out(data, hw_offset, i++, "SIS.0\n");
- instr_out(data, hw_offset, i++, "SIS.1\n");
- }
- if (data[0] & (0x02 << 8)) {
- if (i + 1 >= count)
- BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
- instr_out(data, hw_offset, i++, "DIS.0\n");
- }
- if (data[0] & (0x04 << 8)) {
- if (i + 2 >= count)
- BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
- instr_out(data, hw_offset, i++, "SSB.0\n");
- instr_out(data, hw_offset, i++, "SSB.1\n");
- }
- if (data[0] & (0x08 << 8)) {
- if (i + 2 >= count)
- BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
- instr_out(data, hw_offset, i++, "MSB.0\n");
- instr_out(data, hw_offset, i++, "MSB.1\n");
- }
- if (data[0] & (0x10 << 8)) {
- if (i + 2 >= count)
- BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
- instr_out(data, hw_offset, i++, "PSP.0\n");
- instr_out(data, hw_offset, i++, "PSP.1\n");
- }
- if (data[0] & (0x20 << 8)) {
- if (i + 2 >= count)
- BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
- instr_out(data, hw_offset, i++, "PSC.0\n");
- instr_out(data, hw_offset, i++, "PSC.1\n");
- }
- if (len != i) {
- DRM_ERROR("Bad count in 3DSTATE_LOAD_INDIRECT\n");
- (*failures)++;
- return len;
- }
- return len;
- case 0x04:
- instr_out(data, hw_offset, 0, "3DSTATE_LOAD_STATE_IMMEDIATE_1\n");
- len = (data[0] & 0x0000000f) + 2;
- i = 1;
- for (word = 0; word <= 7; word++) {
- if (data[0] & (1 << (4 + word))) {
- if (i >= count)
- BUFFER_FAIL(count, len, "3DSTATE_LOAD_STATE_IMMEDIATE_1");
-
- /* save vertex state for decode */
- if (word == 2) {
- saved_s2_set = 1;
- saved_s2 = data[i];
- }
- if (word == 4) {
- saved_s4_set = 1;
- saved_s4 = data[i];
- }
-
- instr_out(data, hw_offset, i++, "S%d\n", word);
- }
- }
- if (len != i) {
- DRM_ERROR("Bad count in 3DSTATE_LOAD_INDIRECT\n");
- (*failures)++;
- }
- return len;
- case 0x00:
- instr_out(data, hw_offset, 0, "3DSTATE_MAP_STATE\n");
- len = (data[0] & 0x0000003f) + 2;
-
- i = 1;
- for (map = 0; map <= 15; map++) {
- if (data[1] & (1 << map)) {
- if (i + 3 >= count)
- BUFFER_FAIL(count, len, "3DSTATE_MAP_STATE");
- instr_out(data, hw_offset, i++, "map %d MS2\n", map);
- instr_out(data, hw_offset, i++, "map %d MS3\n", map);
- instr_out(data, hw_offset, i++, "map %d MS4\n", map);
- }
- }
- if (len != i) {
- DRM_ERROR("Bad count in 3DSTATE_MAP_STATE\n");
- (*failures)++;
- return len;
- }
- return len;
- case 0x06:
- instr_out(data, hw_offset, 0, "3DSTATE_PIXEL_SHADER_CONSTANTS\n");
- len = (data[0] & 0x000000ff) + 2;
-
- i = 1;
- for (c = 0; c <= 31; c++) {
- if (data[1] & (1 << c)) {
- if (i + 4 >= count)
- BUFFER_FAIL(count, len, "3DSTATE_PIXEL_SHADER_CONSTANTS");
- instr_out(data, hw_offset, i, "C%d.X = %x float\n",
- c, data[i]);
- i++;
- instr_out(data, hw_offset, i, "C%d.Y = %x float\n",
- c, data[i]);
- i++;
- instr_out(data, hw_offset, i, "C%d.Z = %x float\n",
- c, data[i]);
- i++;
- instr_out(data, hw_offset, i, "C%d.W = %x float\n",
- c, data[i]);
- i++;
- }
- }
- if (len != i) {
- DRM_ERROR("Bad count in 3DSTATE_MAP_STATE\n");
- (*failures)++;
- }
- return len;
- case 0x05:
- instr_out(data, hw_offset, 0, "3DSTATE_PIXEL_SHADER_PROGRAM\n");
- len = (data[0] & 0x000000ff) + 2;
- if ((len - 1) % 3 != 0 || len > 370) {
- DRM_ERROR("Bad count in 3DSTATE_PIXEL_SHADER_PROGRAM\n");
- (*failures)++;
- }
- i = 1;
- for (instr = 0; instr < (len - 1) / 3; instr++) {
- if (i + 3 >= count)
- BUFFER_FAIL(count, len, "3DSTATE_MAP_STATE");
- instr_out(data, hw_offset, i++, "PS%03x\n", instr);
- instr_out(data, hw_offset, i++, "PS%03x\n", instr);
- instr_out(data, hw_offset, i++, "PS%03x\n", instr);
- }
- return len;
- case 0x01:
- if (i830)
- break;
- instr_out(data, hw_offset, 0, "3DSTATE_SAMPLER_STATE\n");
- len = (data[0] & 0x0000003f) + 2;
- i = 1;
- for (sampler = 0; sampler <= 15; sampler++) {
- if (data[1] & (1 << sampler)) {
- if (i + 3 >= count)
- BUFFER_FAIL(count, len, "3DSTATE_SAMPLER_STATE");
- instr_out(data, hw_offset, i++, "sampler %d SS2\n",
- sampler);
- instr_out(data, hw_offset, i++, "sampler %d SS3\n",
- sampler);
- instr_out(data, hw_offset, i++, "sampler %d SS4\n",
- sampler);
- }
- }
- if (len != i) {
- DRM_ERROR("Bad count in 3DSTATE_SAMPLER_STATE\n");
- (*failures)++;
- }
- return len;
- }
-
- for (opcode = 0; opcode < sizeof(opcodes_3d_1d) / sizeof(opcodes_3d_1d[0]);
- opcode++)
- {
- if (opcodes_3d_1d[opcode].i830_only && !i830)
- continue;
-
- if (((data[0] & 0x00ff0000) >> 16) == opcodes_3d_1d[opcode].opcode) {
- len = 1;
-
- instr_out(data, hw_offset, 0, "%s\n", opcodes_3d_1d[opcode].name);
- if (opcodes_3d_1d[opcode].max_len > 1) {
- len = (data[0] & 0x0000ffff) + 2;
- if (len < opcodes_3d_1d[opcode].min_len ||
- len > opcodes_3d_1d[opcode].max_len)
- {
- DRM_ERROR("Bad count in %s\n",
- opcodes_3d_1d[opcode].name);
- (*failures)++;
- }
- }
-
- for (i = 1; i < len; i++) {
- if (i >= count)
- BUFFER_FAIL(count, len, opcodes_3d_1d[opcode].name);
- instr_out(data, hw_offset, i, "dword %d\n", i);
- }
-
- return len;
- }
- }
-
- instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
- (*failures)++;
- return 1;
-}
-
-static int
-decode_3d_primitive(uint32_t *data, int count, uint32_t hw_offset,
- int *failures)
-{
- char immediate = (data[0] & (1 << 23)) == 0;
- unsigned int len, i;
- char *primtype;
-
- switch ((data[0] >> 18) & 0xf) {
- case 0x0: primtype = "TRILIST"; break;
- case 0x1: primtype = "TRISTRIP"; break;
- case 0x2: primtype = "TRISTRIP_REVERSE"; break;
- case 0x3: primtype = "TRIFAN"; break;
- case 0x4: primtype = "POLYGON"; break;
- case 0x5: primtype = "LINELIST"; break;
- case 0x6: primtype = "LINESTRIP"; break;
- case 0x7: primtype = "RECTLIST"; break;
- case 0x8: primtype = "POINTLIST"; break;
- case 0x9: primtype = "DIB"; break;
- case 0xa: primtype = "CLEAR_RECT"; break;
- default: primtype = "unknown"; break;
- }
-
- /* XXX: 3DPRIM_DIB not supported */
- if (immediate) {
- len = (data[0] & 0x0003ffff) + 2;
- instr_out(data, hw_offset, 0, "3DPRIMITIVE inline %s\n", primtype);
- if (count < len)
- BUFFER_FAIL(count, len, "3DPRIMITIVE inline");
- if (!saved_s2_set || !saved_s4_set) {
- DRM_ERROR("unknown vertex format\n");
- for (i = 1; i < len; i++) {
- instr_out(data, hw_offset, i,
- " vertex data (%x float)\n",
- data[i]);
- }
- } else {
- unsigned int vertex = 0;
- for (i = 1; i < len;) {
- unsigned int tc;
-
-#define VERTEX_OUT(fmt, ...) { \
- if (i < len) \
- instr_out(data, hw_offset, i, " V%d."fmt"\n", vertex, __VA_ARGS__); \
- else \
- DRM_ERROR(" missing data in V%d\n", vertex); \
- i++; \
-}
-
- VERTEX_OUT("X = %x float", data[i]);
- VERTEX_OUT("Y = %x float", data[i]);
- switch (saved_s4 >> 6 & 0x7) {
- case 0x1:
- VERTEX_OUT("Z = %x float", data[i]);
- break;
- case 0x2:
- VERTEX_OUT("Z = %x float", data[i]);
- VERTEX_OUT("W = %x float", data[i]);
- break;
- case 0x3:
- break;
- case 0x4:
- VERTEX_OUT("W = %x float", data[i]);
- break;
- default:
- DRM_ERROR("bad S4 position mask\n");
- }
-
- if (saved_s4 & (1 << 10)) {
- VERTEX_OUT("color = (A=0x%02x, R=0x%02x, G=0x%02x, "
- "B=0x%02x)",
- data[i] >> 24,
- (data[i] >> 16) & 0xff,
- (data[i] >> 8) & 0xff,
- data[i] & 0xff);
- }
- if (saved_s4 & (1 << 11)) {
- VERTEX_OUT("spec = (A=0x%02x, R=0x%02x, G=0x%02x, "
- "B=0x%02x)",
- data[i] >> 24,
- (data[i] >> 16) & 0xff,
- (data[i] >> 8) & 0xff,
- data[i] & 0xff);
- }
- if (saved_s4 & (1 << 12))
- VERTEX_OUT("width = 0x%08x)", data[i]);
-
- for (tc = 0; tc <= 7; tc++) {
- switch ((saved_s2 >> (tc * 4)) & 0xf) {
- case 0x0:
- VERTEX_OUT("T%d.X = %x float", tc, data[i]);
- VERTEX_OUT("T%d.Y = %x float", tc, data[i]);
- break;
- case 0x1:
- VERTEX_OUT("T%d.X = %x float", tc, data[i]);
- VERTEX_OUT("T%d.Y = %x float", tc, data[i]);
- VERTEX_OUT("T%d.Z = %x float", tc, data[i]);
- break;
- case 0x2:
- VERTEX_OUT("T%d.X = %x float", tc, data[i]);
- VERTEX_OUT("T%d.Y = %x float", tc, data[i]);
- VERTEX_OUT("T%d.Z = %x float", tc, data[i]);
- VERTEX_OUT("T%d.W = %x float", tc, data[i]);
- break;
- case 0x3:
- VERTEX_OUT("T%d.X = %x float", tc, data[i]);
- break;
- case 0x4:
- VERTEX_OUT("T%d.XY = 0x%08x half-float", tc, data[i]);
- break;
- case 0x5:
- VERTEX_OUT("T%d.XY = 0x%08x half-float", tc, data[i]);
- VERTEX_OUT("T%d.ZW = 0x%08x half-float", tc, data[i]);
- break;
- case 0xf:
- break;
- default:
- DRM_ERROR("bad S2.T%d format\n", tc);
- }
- }
- vertex++;
- }
- }
- } else {
- /* indirect vertices */
- len = data[0] & 0x0000ffff; /* index count */
- if (data[0] & (1 << 17)) {
- /* random vertex access */
- if (count < (len + 1) / 2 + 1)
- BUFFER_FAIL(count, (len + 1) / 2 + 1, "3DPRIMITIVE random indirect");
- instr_out(data, hw_offset, 0,
- "3DPRIMITIVE random indirect %s (%d)\n", primtype, len);
- if (len == 0) {
- /* vertex indices continue until 0xffff is found */
- for (i = 1; i < count; i++) {
- if ((data[i] & 0xffff) == 0xffff) {
- instr_out(data, hw_offset, i,
- " indices: (terminator)\n");
- return i;
- } else if ((data[i] >> 16) == 0xffff) {
- instr_out(data, hw_offset, i,
- " indices: 0x%04x, "
- "(terminator)\n",
- data[i] & 0xffff);
- return i;
- } else {
- instr_out(data, hw_offset, i,
- " indices: 0x%04x, 0x%04x\n",
- data[i] & 0xffff, data[i] >> 16);
- }
- }
- DRM_ERROR("3DPRIMITIVE: no terminator found in index buffer\n");
- (*failures)++;
- return count;
- } else {
- /* fixed size vertex index buffer */
- for (i = 0; i < len; i += 2) {
- if (i * 2 == len - 1) {
- instr_out(data, hw_offset, i,
- " indices: 0x%04x\n",
- data[i] & 0xffff);
- } else {
- instr_out(data, hw_offset, i,
- " indices: 0x%04x, 0x%04x\n",
- data[i] & 0xffff, data[i] >> 16);
- }
- }
- }
- return (len + 1) / 2 + 1;
- } else {
- /* sequential vertex access */
- if (count < 2)
- BUFFER_FAIL(count, 2, "3DPRIMITIVE seq indirect");
- instr_out(data, hw_offset, 0,
- "3DPRIMITIVE sequential indirect %s, %d starting from "
- "%d\n", primtype, len, data[1] & 0xffff);
- instr_out(data, hw_offset, 1, " start\n");
- return 2;
- }
- }
-
- return len;
-}
-
-static int
-decode_3d(uint32_t *data, int count, uint32_t hw_offset, int *failures)
-{
- unsigned int opcode;
-
- struct {
- uint32_t opcode;
- int min_len;
- int max_len;
- char *name;
- } opcodes_3d[] = {
- { 0x06, 1, 1, "3DSTATE_ANTI_ALIASING" },
- { 0x08, 1, 1, "3DSTATE_BACKFACE_STENCIL_OPS" },
- { 0x09, 1, 1, "3DSTATE_BACKFACE_STENCIL_MASKS" },
- { 0x16, 1, 1, "3DSTATE_COORD_SET_BINDINGS" },
- { 0x15, 1, 1, "3DSTATE_FOG_COLOR" },
- { 0x0b, 1, 1, "3DSTATE_INDEPENDENT_ALPHA_BLEND" },
- { 0x0d, 1, 1, "3DSTATE_MODES_4" },
- { 0x0c, 1, 1, "3DSTATE_MODES_5" },
- { 0x07, 1, 1, "3DSTATE_RASTERIZATION_RULES" },
- };
-
- switch ((data[0] & 0x1f000000) >> 24) {
- case 0x1f:
- return decode_3d_primitive(data, count, hw_offset, failures);
- case 0x1d:
- return decode_3d_1d(data, count, hw_offset, failures, 0);
- case 0x1c:
- return decode_3d_1c(data, count, hw_offset, failures);
- }
-
- for (opcode = 0; opcode < sizeof(opcodes_3d) / sizeof(opcodes_3d[0]);
- opcode++) {
- if ((data[0] & 0x1f000000) >> 24 == opcodes_3d[opcode].opcode) {
- unsigned int len = 1, i;
-
- instr_out(data, hw_offset, 0, "%s\n", opcodes_3d[opcode].name);
- if (opcodes_3d[opcode].max_len > 1) {
- len = (data[0] & 0xff) + 2;
- if (len < opcodes_3d[opcode].min_len ||
- len > opcodes_3d[opcode].max_len)
- {
- DRM_ERROR("Bad count in %s\n", opcodes_3d[opcode].name);
- }
- }
-
- for (i = 1; i < len; i++) {
- if (i >= count)
- BUFFER_FAIL(count, len, opcodes_3d[opcode].name);
- instr_out(data, hw_offset, i, "dword %d\n", i);
- }
- return len;
- }
- }
-
- instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
- (*failures)++;
- return 1;
-}
-
-static const char *
-get_965_surfacetype(unsigned int surfacetype)
-{
- switch (surfacetype) {
- case 0: return "1D";
- case 1: return "2D";
- case 2: return "3D";
- case 3: return "CUBE";
- case 4: return "BUFFER";
- case 7: return "NULL";
- default: return "unknown";
- }
-}
-
-static const char *
-get_965_depthformat(unsigned int depthformat)
-{
- switch (depthformat) {
- case 0: return "s8_z24float";
- case 1: return "z32float";
- case 2: return "z24s8";
- case 5: return "z16";
- default: return "unknown";
- }
-}
-
-static int
-decode_3d_965(uint32_t *data, int count, uint32_t hw_offset, int *failures)
-{
- unsigned int opcode, len;
-
- struct {
- uint32_t opcode;
- int min_len;
- int max_len;
- char *name;
- } opcodes_3d[] = {
- { 0x6000, 3, 3, "URB_FENCE" },
- { 0x6001, 2, 2, "CS_URB_STATE" },
- { 0x6002, 2, 2, "CONSTANT_BUFFER" },
- { 0x6101, 6, 6, "STATE_BASE_ADDRESS" },
- { 0x6102, 2, 2 , "STATE_SIP" },
- { 0x6104, 1, 1, "3DSTATE_PIPELINE_SELECT" },
- { 0x680b, 1, 1, "3DSTATE_VF_STATISTICS" },
- { 0x6904, 1, 1, "3DSTATE_PIPELINE_SELECT" },
- { 0x7800, 7, 7, "3DSTATE_PIPELINED_POINTERS" },
- { 0x7801, 6, 6, "3DSTATE_BINDING_TABLE_POINTERS" },
- { 0x780b, 1, 1, "3DSTATE_VF_STATISTICS" },
- { 0x7808, 5, 257, "3DSTATE_VERTEX_BUFFERS" },
- { 0x7809, 3, 256, "3DSTATE_VERTEX_ELEMENTS" },
- /* 0x7808: 3DSTATE_VERTEX_BUFFERS */
- /* 0x7809: 3DSTATE_VERTEX_ELEMENTS */
- { 0x7900, 4, 4, "3DSTATE_DRAWING_RECTANGLE" },
- { 0x7901, 5, 5, "3DSTATE_CONSTANT_COLOR" },
- { 0x7905, 5, 7, "3DSTATE_DEPTH_BUFFER" },
- { 0x7906, 2, 2, "3DSTATE_POLY_STIPPLE_OFFSET" },
- { 0x7907, 33, 33, "3DSTATE_POLY_STIPPLE_PATTERN" },
- { 0x7908, 3, 3, "3DSTATE_LINE_STIPPLE" },
- { 0x7909, 2, 2, "3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP" },
- { 0x790a, 3, 3, "3DSTATE_AA_LINE_PARAMETERS" },
- { 0x7b00, 6, 6, "3DPRIMITIVE" },
- };
-
- len = (data[0] & 0x0000ffff) + 2;
-
- switch ((data[0] & 0xffff0000) >> 16) {
- case 0x6101:
- if (len != 6)
- DRM_ERROR("Bad count in STATE_BASE_ADDRESS\n");
- if (count < 6)
- BUFFER_FAIL(count, len, "STATE_BASE_ADDRESS");
-
- instr_out(data, hw_offset, 0,
- "STATE_BASE_ADDRESS\n");
-
- if (data[1] & 1) {
- instr_out(data, hw_offset, 1, "General state at 0x%08x\n",
- data[1] & ~1);
- } else
- instr_out(data, hw_offset, 1, "General state not updated\n");
-
- if (data[2] & 1) {
- instr_out(data, hw_offset, 2, "Surface state at 0x%08x\n",
- data[2] & ~1);
- } else
- instr_out(data, hw_offset, 2, "Surface state not updated\n");
-
- if (data[3] & 1) {
- instr_out(data, hw_offset, 3, "Indirect state at 0x%08x\n",
- data[3] & ~1);
- } else
- instr_out(data, hw_offset, 3, "Indirect state not updated\n");
-
- if (data[4] & 1) {
- instr_out(data, hw_offset, 4, "General state upper bound 0x%08x\n",
- data[4] & ~1);
- } else
- instr_out(data, hw_offset, 4, "General state not updated\n");
-
- if (data[5] & 1) {
- instr_out(data, hw_offset, 5, "Indirect state upper bound 0x%08x\n",
- data[5] & ~1);
- } else
- instr_out(data, hw_offset, 5, "Indirect state not updated\n");
-
- return len;
- case 0x7800:
- if (len != 7)
- DRM_ERROR("Bad count in 3DSTATE_PIPELINED_POINTERS\n");
- if (count < 7)
- BUFFER_FAIL(count, len, "3DSTATE_PIPELINED_POINTERS");
-
- instr_out(data, hw_offset, 0,
- "3DSTATE_PIPELINED_POINTERS\n");
- instr_out(data, hw_offset, 1, "VS state\n");
- instr_out(data, hw_offset, 2, "GS state\n");
- instr_out(data, hw_offset, 3, "Clip state\n");
- instr_out(data, hw_offset, 4, "SF state\n");
- instr_out(data, hw_offset, 5, "WM state\n");
- instr_out(data, hw_offset, 6, "CC state\n");
- return len;
- case 0x7801:
- if (len != 6)
- DRM_ERROR("Bad count in 3DSTATE_BINDING_TABLE_POINTERS\n");
- if (count < 6)
- BUFFER_FAIL(count, len, "3DSTATE_BINDING_TABLE_POINTERS");
-
- instr_out(data, hw_offset, 0,
- "3DSTATE_BINDING_TABLE_POINTERS\n");
- instr_out(data, hw_offset, 1, "VS binding table\n");
- instr_out(data, hw_offset, 2, "GS binding table\n");
- instr_out(data, hw_offset, 3, "Clip binding table\n");
- instr_out(data, hw_offset, 4, "SF binding table\n");
- instr_out(data, hw_offset, 5, "WM binding table\n");
-
- return len;
-
- case 0x7900:
- if (len != 4)
- DRM_ERROR("Bad count in 3DSTATE_DRAWING_RECTANGLE\n");
- if (count < 4)
- BUFFER_FAIL(count, len, "3DSTATE_DRAWING_RECTANGLE");
-
- instr_out(data, hw_offset, 0,
- "3DSTATE_DRAWING_RECTANGLE\n");
- instr_out(data, hw_offset, 1, "top left: %d,%d\n",
- data[1] & 0xffff,
- (data[1] >> 16) & 0xffff);
- instr_out(data, hw_offset, 2, "bottom right: %d,%d\n",
- data[2] & 0xffff,
- (data[2] >> 16) & 0xffff);
- instr_out(data, hw_offset, 3, "origin: %d,%d\n",
- (int)data[3] & 0xffff,
- ((int)data[3] >> 16) & 0xffff);
-
- return len;
-
- case 0x7905:
- if (len != 5)
- DRM_ERROR("Bad count in 3DSTATE_DEPTH_BUFFER\n");
- if (count < 5)
- BUFFER_FAIL(count, len, "3DSTATE_DEPTH_BUFFER");
-
- instr_out(data, hw_offset, 0,
- "3DSTATE_DEPTH_BUFFER\n");
- instr_out(data, hw_offset, 1, "%s, %s, pitch = %d bytes, %stiled\n",
- get_965_surfacetype(data[1] >> 29),
- get_965_depthformat((data[1] >> 18) & 0x7),
- (data[1] & 0x0001ffff) + 1,
- data[1] & (1 << 27) ? "" : "not ");
- instr_out(data, hw_offset, 2, "depth offset\n");
- instr_out(data, hw_offset, 3, "%dx%d\n",
- ((data[3] & 0x0007ffc0) >> 6) + 1,
- ((data[3] & 0xfff80000) >> 19) + 1);
- instr_out(data, hw_offset, 4, "volume depth\n");
-
- return len;
- }
-
- for (opcode = 0; opcode < sizeof(opcodes_3d) / sizeof(opcodes_3d[0]);
- opcode++) {
- if ((data[0] & 0xffff0000) >> 16 == opcodes_3d[opcode].opcode) {
- unsigned int i;
- len = 1;
-
- instr_out(data, hw_offset, 0, "%s\n", opcodes_3d[opcode].name);
- if (opcodes_3d[opcode].max_len > 1) {
- len = (data[0] & 0xff) + 2;
- if (len < opcodes_3d[opcode].min_len ||
- len > opcodes_3d[opcode].max_len)
- {
- DRM_ERROR("Bad count in %s\n", opcodes_3d[opcode].name);
- }
- }
-
- for (i = 1; i < len; i++) {
- if (i >= count)
- BUFFER_FAIL(count, len, opcodes_3d[opcode].name);
- instr_out(data, hw_offset, i, "dword %d\n", i);
- }
- return len;
- }
- }
-
- instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
- (*failures)++;
- return 1;
-}
-
-
-static int
-decode_3d_i830(uint32_t *data, int count, uint32_t hw_offset, int *failures)
-{
- unsigned int opcode;
-
- struct {
- uint32_t opcode;
- int min_len;
- int max_len;
- char *name;
- } opcodes_3d[] = {
- { 0x02, 1, 1, "3DSTATE_MODES_3" },
- { 0x03, 1, 1, "3DSTATE_ENABLES_1"},
- { 0x04, 1, 1, "3DSTATE_ENABLES_2"},
- { 0x05, 1, 1, "3DSTATE_VFT0"},
- { 0x06, 1, 1, "3DSTATE_AA"},
- { 0x07, 1, 1, "3DSTATE_RASTERIZATION_RULES" },
- { 0x08, 1, 1, "3DSTATE_MODES_1" },
- { 0x09, 1, 1, "3DSTATE_STENCIL_TEST" },
- { 0x0a, 1, 1, "3DSTATE_VFT1"},
- { 0x0b, 1, 1, "3DSTATE_INDPT_ALPHA_BLEND" },
- { 0x0c, 1, 1, "3DSTATE_MODES_5" },
- { 0x0d, 1, 1, "3DSTATE_MAP_BLEND_OP" },
- { 0x0e, 1, 1, "3DSTATE_MAP_BLEND_ARG" },
- { 0x0f, 1, 1, "3DSTATE_MODES_2" },
- { 0x15, 1, 1, "3DSTATE_FOG_COLOR" },
- { 0x16, 1, 1, "3DSTATE_MODES_4" },
- };
-
- switch ((data[0] & 0x1f000000) >> 24) {
- case 0x1f:
- return decode_3d_primitive(data, count, hw_offset, failures);
- case 0x1d:
- return decode_3d_1d(data, count, hw_offset, failures, 1);
- case 0x1c:
- return decode_3d_1c(data, count, hw_offset, failures);
- }
-
- for (opcode = 0; opcode < sizeof(opcodes_3d) / sizeof(opcodes_3d[0]);
- opcode++) {
- if ((data[0] & 0x1f000000) >> 24 == opcodes_3d[opcode].opcode) {
- unsigned int len = 1, i;
-
- instr_out(data, hw_offset, 0, "%s\n", opcodes_3d[opcode].name);
- if (opcodes_3d[opcode].max_len > 1) {
- len = (data[0] & 0xff) + 2;
- if (len < opcodes_3d[opcode].min_len ||
- len > opcodes_3d[opcode].max_len)
- {
- DRM_ERROR("Bad count in %s\n", opcodes_3d[opcode].name);
- }
- }
-
- for (i = 1; i < len; i++) {
- if (i >= count)
- BUFFER_FAIL(count, len, opcodes_3d[opcode].name);
- instr_out(data, hw_offset, i, "dword %d\n", i);
- }
- return len;
- }
- }
-
- instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
- (*failures)++;
- return 1;
-}
-
-void i915_gem_command_decode(uint32_t *data, int count, uint32_t hw_offset, struct drm_device *dev)
-{
- int index = 0;
- int failures = 0;
-
- while (index < count) {
- switch ((data[index] & 0xe0000000) >> 29) {
- case 0x0:
- index += decode_mi(data + index, count - index,
- hw_offset + index * 4, &failures);
- break;
- case 0x2:
- index += decode_2d(data + index, count - index,
- hw_offset + index * 4, &failures);
- break;
- case 0x3:
- if (IS_I965G(dev)) {
- index += decode_3d_965(data + index, count - index,
- hw_offset + index * 4, &failures);
- } else if (IS_I9XX(dev)) {
- index += decode_3d(data + index, count - index,
- hw_offset + index * 4, &failures);
- } else {
- index += decode_3d_i830(data + index, count - index,
- hw_offset + index * 4, &failures);
- }
- break;
- default:
- instr_out(data, hw_offset, index, "UNKNOWN\n");
- failures++;
- index++;
- break;
- }
- }
-}
-
diff --git a/usr/src/uts/intel/io/drm/i915_gem_tiling.c b/usr/src/uts/intel/io/drm/i915_gem_tiling.c
deleted file mode 100644
index 8681fb6..0000000
--- a/usr/src/uts/intel/io/drm/i915_gem_tiling.c
+++ /dev/null
@@ -1,390 +0,0 @@
-/* BEGIN CSTYLED */
-
-/*
- * Copyright (c) 2009, Intel Corporation.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- *
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#include <sys/sysmacros.h>
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-/** @file i915_gem_tiling.c
- *
- * Support for managing tiling state of buffer objects.
- *
- * The idea behind tiling is to increase cache hit rates by rearranging
- * pixel data so that a group of pixel accesses are in the same cacheline.
- * Performance improvement from doing this on the back/depth buffer are on
- * the order of 30%.
- *
- * Intel architectures make this somewhat more complicated, though, by
- * adjustments made to addressing of data when the memory is in interleaved
- * mode (matched pairs of DIMMS) to improve memory bandwidth.
- * For interleaved memory, the CPU sends every sequential 64 bytes
- * to an alternate memory channel so it can get the bandwidth from both.
- *
- * The GPU also rearranges its accesses for increased bandwidth to interleaved
- * memory, and it matches what the CPU does for non-tiled. However, when tiled
- * it does it a little differently, since one walks addresses not just in the
- * X direction but also Y. So, along with alternating channels when bit
- * 6 of the address flips, it also alternates when other bits flip -- Bits 9
- * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
- * are common to both the 915 and 965-class hardware.
- *
- * The CPU also sometimes XORs in higher bits as well, to improve
- * bandwidth doing strided access like we do so frequently in graphics. This
- * is called "Channel XOR Randomization" in the MCH documentation. The result
- * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
- * decode.
- *
- * All of this bit 6 XORing has an effect on our memory management,
- * as we need to make sure that the 3d driver can correctly address object
- * contents.
- *
- * If we don't have interleaved memory, all tiling is safe and no swizzling is
- * required.
- *
- * When bit 17 is XORed in, we simply refuse to tile at all. Bit
- * 17 is not just a page offset, so as we page an objet out and back in,
- * individual pages in it will have different bit 17 addresses, resulting in
- * each 64 bytes being swapped with its neighbor!
- *
- * Otherwise, if interleaved, we have to tell the 3d driver what the address
- * swizzling it needs to do is, since it's writing with the CPU to the pages
- * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
- * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
- * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
- * to match what the GPU expects.
- */
-
-/**
- * Detects bit 6 swizzling of address lookup between IGD access and CPU
- * access through main memory.
- */
-void
-i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-
- if (!IS_I9XX(dev)) {
- /* As far as we know, the 865 doesn't have these bit 6
- * swizzling issues.
- */
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev)) {
- uint32_t dcc;
-
- /* On mobile 9xx chipsets, channel interleave by the CPU is
- * determined by DCC. For single-channel, neither the CPU
- * nor the GPU do swizzling. For dual channel interleaved,
- * the GPU's interleave is bit 9 and 10 for X tiled, and bit
- * 9 for Y tiled. The CPU's interleave is independent, and
- * can be based on either bit 11 (haven't seen this yet) or
- * bit 17 (common).
- */
-
- dcc = I915_READ(DCC);
- switch (dcc & DCC_ADDRESSING_MODE_MASK) {
- case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
- case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- break;
- case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
- if (dcc & DCC_CHANNEL_XOR_DISABLE) {
- /* This is the base swizzling by the GPU for
- * tiled buffers.
- */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
- /* Bit 11 swizzling by the CPU in addition. */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
- swizzle_y = I915_BIT_6_SWIZZLE_9_11;
- } else {
- /* Bit 17 swizzling by the CPU in addition. */
- swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- }
- break;
- }
- if (dcc == 0xffffffff) {
- DRM_ERROR("Couldn't read from MCHBAR. "
- "Disabling tiling.\n");
- swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- }
- } else {
- /* The 965, G33, and newer, have a very flexible memory
- * configuration. It will enable dual-channel mode
- * (interleaving) on as much memory as it can, and the GPU
- * will additionally sometimes enable different bit 6
- * swizzling for tiled objects from the CPU.
- *
- * Here's what I found on the G965:
- * slot fill memory size swizzling
- * 0A 0B 1A 1B 1-ch 2-ch
- * 512 0 0 0 512 0 O
- * 512 0 512 0 16 1008 X
- * 512 0 0 512 16 1008 X
- * 0 512 0 512 16 1008 X
- * 1024 1024 1024 0 2048 1024 O
- *
- * We could probably detect this based on either the DRB
- * matching, which was the case for the swizzling required in
- * the table above, or from the 1-ch value being less than
- * the minimum size of a rank.
- */
- if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- }
- }
-
- /* FIXME: check with memory config on IGDNG */
- if (IS_IGDNG(dev)) {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- }
-
- dev_priv->mm.bit_6_swizzle_x = swizzle_x;
- dev_priv->mm.bit_6_swizzle_y = swizzle_y;
-}
-
-
-/**
- * Returns the size of the fence for a tiled object of the given size.
- */
-static int
-i915_get_fence_size(struct drm_device *dev, int size)
-{
- int i;
- int start;
-
- if (IS_I965G(dev)) {
- /* The 965 can have fences at any page boundary. */
-
- return (size + PAGE_SIZE-1) & ~(PAGE_SIZE-1);
- } else {
- /* Align the size to a power of two greater than the smallest
- * fence size.
- */
- if (IS_I9XX(dev))
- start = 1024 * 1024;
- else
- start = 512 * 1024;
-
- for (i = start; i < size; i <<= 1)
- ;
-
- return i;
- }
-}
-
-/* Check pitch constriants for all chips & tiling formats */
-static int
-i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
-{
- int tile_width;
-
- /* Linear is always fine */
- if (tiling_mode == I915_TILING_NONE)
- return 1;
-
- if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
- tile_width = 128;
- else
- tile_width = 512;
-
- if (stride == 0)
- return 0;
-
- /* 965+ just needs multiples of tile width */
- if (IS_I965G(dev)) {
- if (stride & (tile_width - 1))
- return 0;
- return 1;
- }
-
- /* Pre-965 needs power of two tile widths */
- if (stride < tile_width)
- return 0;
-
- if (!ISP2(stride))
- return 0;
-
- /* We don't handle the aperture area covered by the fence being bigger
- * than the object size.
- */
- if (i915_get_fence_size(dev, size) != size)
- return 0;
-
- return 1;
-}
-
-/**
- * Sets the tiling mode of an object, returning the required swizzling of
- * bit 6 of addresses in the object.
- */
-/*ARGSUSED*/
-int
-i915_gem_set_tiling(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- struct drm_i915_gem_set_tiling args;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- DRM_COPYFROM_WITH_RETURN(&args,
- (struct drm_i915_gem_set_tiling __user *) data, sizeof(args));
-
- obj = drm_gem_object_lookup(fpriv, args.handle);
- if (obj == NULL)
- return EINVAL;
- obj_priv = obj->driver_private;
-
- if (!i915_tiling_ok(dev, args.stride, obj->size, args.tiling_mode)) {
- drm_gem_object_unreference(obj);
- DRM_DEBUG("i915 tiling is not OK");
- return EINVAL;
- }
-
- spin_lock(&dev->struct_mutex);
-
- if (args.tiling_mode == I915_TILING_NONE) {
- args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
- } else {
- if (args.tiling_mode == I915_TILING_X)
- args.swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
- else
- args.swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
- /* If we can't handle the swizzling, make it untiled. */
- if (args.swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
- args.tiling_mode = I915_TILING_NONE;
- args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
- }
- }
-
- if (args.tiling_mode != obj_priv->tiling_mode) {
- int ret;
-
- /* Unbind the object, as switching tiling means we're
- * switching the cache organization due to fencing, probably.
- */
- ret = i915_gem_object_unbind(obj, 1);
- if (ret != 0) {
- args.tiling_mode = obj_priv->tiling_mode;
- spin_unlock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- DRM_ERROR("tiling switch!! unbind error %d", ret);
- return ret;
- }
- obj_priv->tiling_mode = args.tiling_mode;
- }
- obj_priv->stride = args.stride;
-
- ret = DRM_COPY_TO_USER((struct drm_i915_gem_set_tiling __user *) data, &args, sizeof(args));
- if ( ret != 0)
- DRM_ERROR(" gem set tiling error! %d", ret);
-
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Returns the current tiling mode and required bit 6 swizzling for the object.
- */
-/*ARGSUSED*/
-int
-i915_gem_get_tiling(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- struct drm_i915_gem_get_tiling args;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- if (dev->driver->use_gem != 1)
- return ENODEV;
-
- DRM_COPYFROM_WITH_RETURN(&args,
- (struct drm_i915_gem_get_tiling __user *) data, sizeof(args));
-
- obj = drm_gem_object_lookup(fpriv, args.handle);
- if (obj == NULL)
- return EINVAL;
- obj_priv = obj->driver_private;
-
- spin_lock(&dev->struct_mutex);
-
- args.tiling_mode = obj_priv->tiling_mode;
- switch (obj_priv->tiling_mode) {
- case I915_TILING_X:
- args.swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
- break;
- case I915_TILING_Y:
- args.swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
- break;
- case I915_TILING_NONE:
- args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
- break;
- default:
- DRM_ERROR("unknown tiling mode\n");
- }
-
-
-
- ret = DRM_COPY_TO_USER((struct drm_i915_gem_get_tiling __user *) data, &args, sizeof(args));
- if ( ret != 0)
- DRM_ERROR(" gem get tiling error! %d", ret);
-
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
-
- return 0;
-}
diff --git a/usr/src/uts/intel/io/drm/i915_irq.c b/usr/src/uts/intel/io/drm/i915_irq.c
deleted file mode 100644
index d4023b5..0000000
--- a/usr/src/uts/intel/io/drm/i915_irq.c
+++ /dev/null
@@ -1,1052 +0,0 @@
-/* BEGIN CSTYLED */
-
-/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
- */
-/*
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * Copyright (c) 2009, Intel Corporation.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-
-#define MAX_NOPID ((u32)~0)
-
-/**
- * Interrupts that are always left unmasked.
- *
- * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
- * we leave them always unmasked in IMR and then control enabling them through
- * PIPESTAT alone.
- */
-
-#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-
-/** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
-
-/** These are all of the interrupts used by the driver */
-#define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \
- I915_INTERRUPT_ENABLE_VAR)
-
-void
-igdng_enable_irq(drm_i915_private_t *dev_priv, u32 mask, int gfx_irq)
-{
- if (gfx_irq && ((dev_priv->gt_irq_mask_reg & mask) != 0)) {
- dev_priv->gt_irq_mask_reg &= ~mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- (void) I915_READ(GTIMR);
- } else if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void) I915_READ(DEIMR);
-
- }
-}
-
-static inline void
-igdng_disable_irq(drm_i915_private_t *dev_priv, u32 mask, int gfx_irq)
-{
- if (gfx_irq && ((dev_priv->gt_irq_mask_reg & mask) != mask)) {
- dev_priv->gt_irq_mask_reg |= mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- (void) I915_READ(GTIMR);
- } else if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void) I915_READ(DEIMR);
- }
-}
-
-/* For display hotplug interrupt */
-void
-igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void) I915_READ(DEIMR);
- }
-}
-
-#if 0
-static inline void
-igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void) I915_READ(DEIMR);
- }
-}
-#endif
-
-static inline void
-i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
-{
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IMR);
- }
-}
-
-static inline void
-i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
-{
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IMR);
- }
-}
-
-static inline uint32_t
-i915_pipestat(int pipe)
-{
- if (pipe == 0)
- return PIPEASTAT;
- if (pipe == 1)
- return PIPEBSTAT;
- return 0;
-}
-
-void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, uint32_t mask)
-{
- if ((dev_priv->pipestat[pipe] & mask) != mask) {
- u32 reg = i915_pipestat(pipe);
-
- dev_priv->pipestat[pipe] |= mask;
- /* Enable the interrupt, clear any pending status */
- I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
- (void) I915_READ(reg);
- }
-}
-
-void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
-{
- if ((dev_priv->pipestat[pipe] & mask) != 0) {
- u32 reg = i915_pipestat(pipe);
-
- dev_priv->pipestat[pipe] &= ~mask;
- I915_WRITE(reg, dev_priv->pipestat[pipe]);
- (void) I915_READ(reg);
- }
-}
-
-/**
- * i915_pipe_enabled - check if a pipe is enabled
- * @dev: DRM device
- * @pipe: pipe to check
- *
- * Reading certain registers when the pipe is disabled can hang the chip.
- * Use this routine to make sure the PLL is running and the pipe is active
- * before reading such registers if unsure.
- */
-static int
-i915_pipe_enabled(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
-
- if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
- return 1;
-
- return 0;
-}
-
-u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long high_frame;
- unsigned long low_frame;
- u32 high1, high2, low, count;
-
- high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
- low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
-
- if (!i915_pipe_enabled(dev, pipe)) {
- DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
- return 0;
- }
-
- /*
- * High & low register fields aren't synchronized, so make sure
- * we get a low value that's stable across two reads of the high
- * register.
- */
- do {
- high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
- PIPE_FRAME_HIGH_SHIFT);
- low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
- PIPE_FRAME_LOW_SHIFT);
- high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
- PIPE_FRAME_HIGH_SHIFT);
- } while (high1 != high2);
-
- count = (high1 << 8) | low;
-
- return count;
-}
-
-/**
- * i915_capture_error_state - capture an error record for later analysis
- * @dev: drm device
- *
- * Should be called when an error is detected (either a hang or an error
- * interrupt) to capture error state from the time of the error. Fills
- * out a structure which becomes available in debugfs for user level tools
- * to pick up.
- */
-static void i915_capture_error_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_error_state *error;
-
- spin_lock_irqsave(&dev_priv->error_lock, flags);
-#if 0
- if (dev_priv->first_error)
- goto out;
-#endif
- error = drm_alloc(sizeof(*error), DRM_MEM_DRIVER);
- if (!error) {
- DRM_DEBUG("out ot memory, not capturing error state\n");
- goto out;
- }
-
- error->eir = I915_READ(EIR);
- error->pgtbl_er = I915_READ(PGTBL_ER);
- error->pipeastat = I915_READ(PIPEASTAT);
- error->pipebstat = I915_READ(PIPEBSTAT);
- error->instpm = I915_READ(INSTPM);
- if (!IS_I965G(dev)) {
- error->ipeir = I915_READ(IPEIR);
- error->ipehr = I915_READ(IPEHR);
- error->instdone = I915_READ(INSTDONE);
- error->acthd = I915_READ(ACTHD);
- } else {
- error->ipeir = I915_READ(IPEIR_I965);
- error->ipehr = I915_READ(IPEHR_I965);
- error->instdone = I915_READ(INSTDONE_I965);
- error->instps = I915_READ(INSTPS);
- error->instdone1 = I915_READ(INSTDONE1);
- error->acthd = I915_READ(ACTHD_I965);
- }
-
- (void) uniqtime(&error->time);
-
- dev_priv->first_error = error;
-
- DRM_DEBUG("Time: %ld s %ld us\n", error->time.tv_sec,
- error->time.tv_usec);
- DRM_DEBUG("EIR: 0x%08x\n", error->eir);
- DRM_DEBUG(" PGTBL_ER: 0x%08x\n", error->pgtbl_er);
- DRM_DEBUG(" INSTPM: 0x%08x\n", error->instpm);
- DRM_DEBUG(" IPEIR: 0x%08x\n", error->ipeir);
- DRM_DEBUG(" IPEHR: 0x%08x\n", error->ipehr);
- DRM_DEBUG(" INSTDONE: 0x%08x\n", error->instdone);
- DRM_DEBUG(" ACTHD: 0x%08x\n", error->acthd);
- DRM_DEBUG(" DMA_FADD_P: 0x%08x\n", I915_READ(0x2078));
- if (IS_I965G(dev)) {
- DRM_DEBUG(" INSTPS: 0x%08x\n", error->instps);
- DRM_DEBUG(" INSTDONE1: 0x%08x\n", error->instdone1);
- }
- drm_free(error, sizeof(*error), DRM_MEM_DRIVER);
-out:
- spin_unlock_irqrestore(&dev_priv->error_lock, flags);
-}
-
-/**
- * i915_handle_error - handle an error interrupt
- * @dev: drm device
- *
- * Do some basic checking of regsiter state at error interrupt time and
- * dump it to the syslog. Also call i915_capture_error_state() to make
- * sure we get a record and make it available in debugfs. Fire a uevent
- * so userspace knows something bad happened (should trigger collection
- * of a ring dump etc.).
- */
-void i915_handle_error(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 eir = I915_READ(EIR);
- u32 pipea_stats = I915_READ(PIPEASTAT);
- u32 pipeb_stats = I915_READ(PIPEBSTAT);
-
- i915_capture_error_state(dev);
-
- DRM_DEBUG("render error detected, EIR: 0x%08x\n",
- eir);
-
- if (IS_G4X(dev)) {
- if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
- u32 ipeir = I915_READ(IPEIR_I965);
-
- DRM_DEBUG(" IPEIR: 0x%08x\n",
- I915_READ(IPEIR_I965));
- DRM_DEBUG(" IPEHR: 0x%08x\n",
- I915_READ(IPEHR_I965));
- DRM_DEBUG(" INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE_I965));
- DRM_DEBUG(" INSTPS: 0x%08x\n",
- I915_READ(INSTPS));
- DRM_DEBUG(" INSTDONE1: 0x%08x\n",
- I915_READ(INSTDONE1));
- DRM_DEBUG(" ACTHD: 0x%08x\n",
- I915_READ(ACTHD_I965));
- I915_WRITE(IPEIR_I965, ipeir);
- (void)I915_READ(IPEIR_I965);
- }
- if (eir & GM45_ERROR_PAGE_TABLE) {
- u32 pgtbl_err = I915_READ(PGTBL_ER);
- DRM_DEBUG("page table error\n");
- DRM_DEBUG(" PGTBL_ER: 0x%08x\n",
- pgtbl_err);
- I915_WRITE(PGTBL_ER, pgtbl_err);
- (void)I915_READ(PGTBL_ER);
- }
- }
-
- if (IS_I9XX(dev)) {
- if (eir & I915_ERROR_PAGE_TABLE) {
- u32 pgtbl_err = I915_READ(PGTBL_ER);
- DRM_DEBUG("page table error\n");
- DRM_DEBUG("PGTBL_ER: 0x%08x\n",
- pgtbl_err);
- I915_WRITE(PGTBL_ER, pgtbl_err);
- (void)I915_READ(PGTBL_ER);
- }
- }
-
- if (eir & I915_ERROR_MEMORY_REFRESH) {
- DRM_DEBUG("memory refresh error\n");
- DRM_DEBUG("PIPEASTAT: 0x%08x\n",
- pipea_stats);
- DRM_DEBUG("PIPEBSTAT: 0x%08x\n",
- pipeb_stats);
- /* pipestat has already been acked */
- }
- if (eir & I915_ERROR_INSTRUCTION) {
- DRM_DEBUG("instruction error\n");
- DRM_DEBUG(" INSTPM: 0x%08x\n",
- I915_READ(INSTPM));
- if (!IS_I965G(dev)) {
- u32 ipeir = I915_READ(IPEIR);
-
- DRM_DEBUG(" IPEIR: 0x%08x\n",
- I915_READ(IPEIR));
- DRM_DEBUG(" IPEHR: 0x%08x\n",
- I915_READ(IPEHR));
- DRM_DEBUG(" INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE));
- DRM_DEBUG(" ACTHD: 0x%08x\n",
- I915_READ(ACTHD));
- I915_WRITE(IPEIR, ipeir);
- (void)I915_READ(IPEIR);
- } else {
- u32 ipeir = I915_READ(IPEIR_I965);
-
- DRM_DEBUG(" IPEIR: 0x%08x\n",
- I915_READ(IPEIR_I965));
- DRM_DEBUG(" IPEHR: 0x%08x\n",
- I915_READ(IPEHR_I965));
- DRM_DEBUG(" INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE_I965));
- DRM_DEBUG(" INSTPS: 0x%08x\n",
- I915_READ(INSTPS));
- DRM_DEBUG(" INSTDONE1: 0x%08x\n",
- I915_READ(INSTDONE1));
- DRM_DEBUG(" ACTHD: 0x%08x\n",
- I915_READ(ACTHD_I965));
- I915_WRITE(IPEIR_I965, ipeir);
- (void)I915_READ(IPEIR_I965);
- }
- }
-
- I915_WRITE(EIR, eir);
- (void)I915_READ(EIR);
- eir = I915_READ(EIR);
- if (eir) {
- /*
- * some errors might have become stuck,
- * mask them.
- */
- DRM_DEBUG("EIR stuck: 0x%08x, masking\n", eir);
- I915_WRITE(EMR, I915_READ(EMR) | eir);
- I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
- }
-
-}
-
-u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
-
- if (!i915_pipe_enabled(dev, pipe)) {
- DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
- return 0;
- }
-
- return I915_READ(reg);
-}
-
-irqreturn_t igdng_irq_handler(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier;
- u32 new_de_iir, new_gt_iir;
- int vblank = 0;
-
- /* disable master interrupt before clearing iir */
- de_ier = I915_READ(DEIER);
- I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- (void)I915_READ(DEIER);
-
- de_iir = I915_READ(DEIIR);
- gt_iir = I915_READ(GTIIR);
-
- for (;;) {
- if (de_iir == 0 && gt_iir == 0)
- break;
-
- ret = IRQ_HANDLED;
-
- I915_WRITE(DEIIR, de_iir);
- new_de_iir = I915_READ(DEIIR);
- I915_WRITE(GTIIR, gt_iir);
- new_gt_iir = I915_READ(GTIIR);
-
- if (dev_priv->sarea_priv) {
- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
- }
-
- if (gt_iir & GT_USER_INTERRUPT) {
- dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
- DRM_WAKEUP(&dev_priv->irq_queue);
- }
- if (de_iir & DE_PIPEA_VBLANK) {
- vblank++;
- drm_handle_vblank(dev, 0);
- }
-
- if (de_iir & DE_PIPEB_VBLANK) {
- vblank++;
- drm_handle_vblank(dev, 1);
- }
-
- de_iir = new_de_iir;
- gt_iir = new_gt_iir;
- }
-
- I915_WRITE(DEIER, de_ier);
- (void)I915_READ(DEIER);
-
- return ret;
-}
-
-irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
-{
- drm_device_t *dev = (drm_device_t *) (void *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 iir;
- u32 pipea_stats = 0, pipeb_stats = 0;
- int vblank = 0;
-
- if (IS_IGDNG(dev))
- return igdng_irq_handler(dev);
-
- iir = I915_READ(IIR);
-
- if (iir == 0) {
- return IRQ_NONE;
- }
-start:
-
- if (dev_priv->sarea_priv) {
- if (dev_priv->hw_status_page)
- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- }
-
- I915_WRITE(IIR, iir);
-
- (void) I915_READ(IIR); /* Flush posted writes */
-
-
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev);
-
- if (iir & I915_USER_INTERRUPT) {
- dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
- DRM_WAKEUP(&dev_priv->irq_queue);
- }
-
- if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
- pipea_stats = I915_READ(PIPEASTAT);
-
- /* The vblank interrupt gets enabled even if we didn't ask for
- it, so make sure it's shut down again */
- if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
- pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
- PIPE_VBLANK_INTERRUPT_ENABLE);
- else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
- PIPE_VBLANK_INTERRUPT_STATUS))
- {
- vblank++;
- drm_handle_vblank(dev, 0);
- }
-
- I915_WRITE(PIPEASTAT, pipea_stats);
- }
- if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
- pipeb_stats = I915_READ(PIPEBSTAT);
-
- /* The vblank interrupt gets enabled even if we didn't ask for
- it, so make sure it's shut down again */
- if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
- pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
- PIPE_VBLANK_INTERRUPT_ENABLE);
- else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
- PIPE_VBLANK_INTERRUPT_STATUS))
- {
- vblank++;
- drm_handle_vblank(dev, 1);
- }
-
- I915_WRITE(PIPEBSTAT, pipeb_stats);
- }
- return IRQ_HANDLED;
-
-}
-
-int i915_emit_irq(drm_device_t * dev)
-{
-
- drm_i915_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- i915_kernel_lost_context(dev);
-
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 1;
- if (dev_priv->sarea_priv)
- dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
-
-#if defined(__i386)
- if (IS_GM45(dev)) {
- BEGIN_LP_RING(3);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- ADVANCE_LP_RING();
-
- (void) READ_BREADCRUMB(dev_priv);
- BEGIN_LP_RING(2);
- OUT_RING(0);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
- } else {
-#endif /* __i386 */
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
-#if defined(__i386)
- }
-#endif /* __i386 */
-
-#if defined(__i386)
- if (IS_I965GM(dev) || IS_IGDNG(dev) || IS_GM45(dev))
-#else
- if (IS_I965GM(dev) || IS_IGDNG(dev))
-#endif /* __i386 */
- {
- (void) READ_BREADCRUMB(dev_priv);
- BEGIN_LP_RING(2);
- OUT_RING(0);
- OUT_RING(0);
- ADVANCE_LP_RING();
- (void) READ_BREADCRUMB(dev_priv);
- }
-
- return dev_priv->counter;
-}
-
-void i915_user_irq_on(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- spin_lock(&dev_priv->user_irq_lock);
- if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
- if (IS_IGDNG(dev))
- igdng_enable_irq(dev_priv, GT_USER_INTERRUPT, 1);
- else
- i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
- }
- spin_unlock(&dev_priv->user_irq_lock);
-
-}
-
-void i915_user_irq_off(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- spin_lock(&dev_priv->user_irq_lock);
- if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- if (IS_IGDNG(dev))
- igdng_disable_irq(dev_priv, GT_USER_INTERRUPT, 1);
- else
- i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
- }
- spin_unlock(&dev_priv->user_irq_lock);
-}
-
-
-static int i915_wait_irq(drm_device_t * dev, int irq_nr)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int ret = 0;
- int wait_time = 0;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
-waitmore:
- wait_time++;
- if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
- if (dev_priv->sarea_priv) {
- dev_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
- return 0;
- }
- DRM_DEBUG("i915_wait_irq: irq_nr=%d breadcrumb=%d\n", irq_nr, READ_BREADCRUMB(dev_priv));
- i915_user_irq_on(dev);
- DRM_WAIT_ON(ret, &dev_priv->irq_queue, 3 * DRM_HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
- i915_user_irq_off(dev);
-
- if (ret == EBUSY) {
- if (wait_time > 5) {
- DRM_DEBUG("%d: EBUSY -- rec: %d emitted: %d\n",
- ret,
- READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
- return ret;
- }
- goto waitmore;
- }
-
- if (dev_priv->sarea_priv)
- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
- if (ret == EINTR) {
- if (wait_time > 5) {
- DRM_DEBUG("EINTR wait %d now %d", dev_priv->counter, READ_BREADCRUMB(dev_priv));
- return ret;
- }
- goto waitmore;
- }
-
- return ret;
-}
-
-
-/* Needs the lock as it touches the ring.
- */
-/*ARGSUSED*/
-int i915_irq_emit(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_irq_emit_t emit;
- int result;
-
- LOCK_TEST_WITH_RETURN(dev, fpriv);
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return (EINVAL);
- }
-
-
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_i915_irq_emit32_t irq_emit32;
-
- DRM_COPYFROM_WITH_RETURN(&irq_emit32,
- (drm_i915_irq_emit32_t __user *) data,
- sizeof (drm_i915_irq_emit32_t));
- emit.irq_seq = (int __user *)(uintptr_t)irq_emit32.irq_seq;
- } else
- DRM_COPYFROM_WITH_RETURN(&emit,
- (drm_i915_irq_emit_t __user *) data, sizeof(emit));
-
- spin_lock(&dev->struct_mutex);
- result = i915_emit_irq(dev);
- spin_unlock(&dev->struct_mutex);
-
- if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return (EFAULT);
- }
-
- return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-/*ARGSUSED*/
-int i915_irq_wait(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_irq_wait_t irqwait;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return (EINVAL);
- }
-
- DRM_COPYFROM_WITH_RETURN(&irqwait,
- (drm_i915_irq_wait_t __user *) data, sizeof(irqwait));
-
- return i915_wait_irq(dev, irqwait.irq_seq);
-}
-
-static void igdng_enable_vblank(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 vblank;
-
- if (pipe == 0)
- vblank = DE_PIPEA_VBLANK;
- else
- vblank = DE_PIPEB_VBLANK;
-
- if ((dev_priv->de_irq_enable_reg & vblank) == 0) {
- igdng_enable_irq(dev_priv, vblank, 0);
- dev_priv->de_irq_enable_reg |= vblank;
- I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
- (void) I915_READ(DEIER);
- }
-}
-
-static void igdng_disable_vblank(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 vblank;
-
- if (pipe == 0)
- vblank = DE_PIPEA_VBLANK;
- else
- vblank = DE_PIPEB_VBLANK;
-
- if ((dev_priv->de_irq_enable_reg & vblank) != 0) {
- igdng_disable_irq(dev_priv, vblank, 0);
- dev_priv->de_irq_enable_reg &= ~vblank;
- I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
- (void) I915_READ(DEIER);
- }
-}
-
-int i915_enable_vblank(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- u32 pipeconf;
-
- pipeconf = I915_READ(pipeconf_reg);
- if (!(pipeconf & PIPEACONF_ENABLE))
- return -EINVAL;
-
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (IS_IGDNG(dev))
- igdng_enable_vblank(dev, pipe);
- else if (IS_I965G(dev))
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_ENABLE);
- else
- i915_enable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-
- return 0;
-}
-
-void i915_disable_vblank(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (IS_IGDNG(dev))
- igdng_disable_vblank(dev, pipe);
- else
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_ENABLE |
- PIPE_START_VBLANK_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
-/* Set the vblank monitor pipe
- */
-/*ARGSUSED*/
-int i915_vblank_pipe_set(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return (-EINVAL);
- }
-
- return (0);
-}
-
-/*ARGSUSED*/
-int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t pipe;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- DRM_COPYFROM_WITH_RETURN(&pipe, (drm_i915_vblank_pipe_t __user *)data, sizeof (pipe));
-
- pipe.pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
- return 0;
-}
-
-/**
- * Schedule buffer swap at given vertical blank.
- */
-/*ARGSUSED*/
-int i915_vblank_swap(DRM_IOCTL_ARGS)
-{
- /* The delayed swap mechanism was fundamentally racy, and has been
- * removed. The model was that the client requested a delayed flip/swap
- * from the kernel, then waited for vblank before continuing to perform
- * rendering. The problem was that the kernel might wake the client
- * up before it dispatched the vblank swap (since the lock has to be
- * held while touching the ringbuffer), in which case the client would
- * clear and start the next frame before the swap occurred, and
- * flicker would occur in addition to likely missing the vblank.
- *
- * In the absence of this ioctl, userland falls back to a correct path
- * of waiting for a vblank, then dispatching the swap on its own.
- * Context switching to userland and back is plenty fast enough for
- * meeting the requirements of vblank swapping.
- */
- return -EINVAL;
-
-}
-
-/* drm_dma.h hooks
-*/
-
-static void igdng_irq_preinstall(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- I915_WRITE(HWSTAM, 0xeffe);
-
- /* XXX hotplug from PCH */
-
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- (void) I915_READ(DEIER);
-
- /* and GT */
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- (void) I915_READ(GTIER);
-}
-
-static int igdng_irq_postinstall(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- /* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
- u32 render_mask = GT_USER_INTERRUPT;
-
- dev_priv->irq_mask_reg = ~display_mask;
- dev_priv->de_irq_enable_reg = display_mask;
-
- /* should always can generate irq */
- I915_WRITE(DEIIR, I915_READ(DEIIR));
- (void) I915_READ(DEIIR);
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
- (void) I915_READ(DEIER);
-
- /* user interrupt should be enabled, but masked initial */
- dev_priv->gt_irq_mask_reg = 0xffffffff;
- dev_priv->gt_irq_enable_reg = render_mask;
-
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- (void) I915_READ(GTIIR);
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
- (void) I915_READ(GTIER);
-
- return 0;
-}
-
-static void igdng_irq_uninstall(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- I915_WRITE(HWSTAM, 0xffffffff);
-
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- I915_WRITE(DEIIR, I915_READ(DEIIR));
-
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- I915_WRITE(GTIIR, I915_READ(GTIIR));
-}
-
-int i915_driver_irq_preinstall(drm_device_t * dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- if (!dev_priv->mmio_map)
- return -EINVAL;
-
- if (IS_IGDNG(dev)) {
- igdng_irq_preinstall(dev);
- return 0;
- }
-
- I915_WRITE16(HWSTAM, 0xeffe);
- I915_WRITE(PIPEASTAT, 0);
- I915_WRITE(PIPEBSTAT, 0);
- I915_WRITE(IMR, 0xffffffff);
- I915_WRITE16(IER, 0x0);
- (void) I915_READ(IER);
-
- return 0;
-}
-
-void i915_driver_irq_postinstall(drm_device_t * dev)
-{
- int error_mask;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
- if (IS_IGDNG(dev)) {
- (void) igdng_irq_postinstall(dev);
- DRM_INIT_WAITQUEUE(&dev_priv->irq_queue, DRM_INTR_PRI(dev));
- return;
- }
-
- /* Unmask the interrupts that we always want on. */
- dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
-
- dev_priv->pipestat[0] = 0;
- dev_priv->pipestat[1] = 0;
-
- /*
- * Enable some error detection, note the instruction error mask
- * bit is reserved, so we leave it masked.
- */
- if (IS_G4X(dev)) {
- error_mask = ~(GM45_ERROR_PAGE_TABLE |
- GM45_ERROR_MEM_PRIV |
- GM45_ERROR_CP_PRIV |
- I915_ERROR_MEMORY_REFRESH);
- } else {
- error_mask = ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH);
- }
- I915_WRITE(EMR, error_mask);
-
- /* Disable pipe interrupt enables, clear pending pipe status */
- I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
- I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
- (void) I915_READ(PIPEASTAT);
- (void) I915_READ(PIPEBSTAT);
- /* Clear pending interrupt status */
- I915_WRITE(IIR, I915_READ(IIR));
-
- (void) I915_READ(IIR);
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
- (void) I915_READ(IER);
-
- DRM_INIT_WAITQUEUE(&dev_priv->irq_queue, DRM_INTR_PRI(dev));
-
- return;
-}
-
-void i915_driver_irq_uninstall(drm_device_t * dev)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- if ((!dev_priv) || (dev->irq_enabled == 0))
- return;
-
- dev_priv->vblank_pipe = 0;
-
- if (IS_IGDNG(dev)) {
- igdng_irq_uninstall(dev);
- DRM_FINI_WAITQUEUE(&dev_priv->irq_queue);
- return;
- }
-
- I915_WRITE(HWSTAM, 0xffffffff);
- I915_WRITE(PIPEASTAT, 0);
- I915_WRITE(PIPEBSTAT, 0);
- I915_WRITE(IMR, 0xffffffff);
- I915_WRITE(IER, 0x0);
-
- I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
- I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
- I915_WRITE(IIR, I915_READ(IIR));
-
- DRM_FINI_WAITQUEUE(&dev_priv->irq_queue);
-}
diff --git a/usr/src/uts/intel/io/drm/i915_mem.c b/usr/src/uts/intel/io/drm/i915_mem.c
deleted file mode 100644
index 445bfa1..0000000
--- a/usr/src/uts/intel/io/drm/i915_mem.c
+++ /dev/null
@@ -1,425 +0,0 @@
-/* BEGIN CSTYLED */
-
-/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
- */
-/*
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident "%Z%%M% %I% %E% SMI"
-
-/* This memory manager is integrated into the global/local lru
- * mechanisms used by the clients. Specifically, it operates by
- * setting the 'in_use' fields of the global LRU to indicate whether
- * this region is privately allocated to a client.
- *
- * This does require the client to actually respect that field.
- *
- * Currently no effort is made to allocate 'private' memory in any
- * clever way - the LRU information isn't used to determine which
- * block to allocate, and the ring is drained prior to allocations --
- * in other words allocation is expensive.
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_tex_region_t *list;
- unsigned shift, nr;
- unsigned start;
- unsigned end;
- unsigned i;
- int age;
-
- shift = dev_priv->tex_lru_log_granularity;
- nr = I915_NR_TEX_REGIONS;
-
- start = p->start >> shift;
- end = (p->start + p->size - 1) >> shift;
-
- age = ++sarea_priv->texAge;
- list = sarea_priv->texList;
-
- /* Mark the regions with the new flag and update their age. Move
- * them to head of list to preserve LRU semantics.
- */
- for (i = start; i <= end; i++) {
- list[i].in_use = (unsigned char)in_use;
- list[i].age = age;
-
- /* remove_from_list(i)
- */
- list[(unsigned)list[i].next].prev = list[i].prev;
- list[(unsigned)list[i].prev].next = list[i].next;
-
- /* insert_at_head(list, i)
- */
- list[i].prev = (unsigned char)nr;
- list[i].next = list[nr].next;
- list[(unsigned)list[nr].next].prev = (unsigned char)i;
- list[nr].next = (unsigned char)i;
- }
-}
-
-/* Very simple allocator for agp memory, working on a static range
- * already mapped into each client's address space.
- */
-
-static struct mem_block *split_block(struct mem_block *p, int start, int size, drm_file_t *fpriv)
-{
- /* Maybe cut off the start of an existing block */
- if (start > p->start) {
- struct mem_block *newblock =
- drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
- if (!newblock)
- goto out;
- newblock->start = start;
- newblock->size = p->size - (start - p->start);
- newblock->filp = NULL;
- newblock->next = p->next;
- newblock->prev = p;
- p->next->prev = newblock;
- p->next = newblock;
- p->size -= newblock->size;
- p = newblock;
- }
-
- /* Maybe cut off the end of an existing block */
- if (size < p->size) {
- struct mem_block *newblock =
- drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
- if (!newblock)
- goto out;
- newblock->start = start + size;
- newblock->size = p->size - size;
- newblock->filp = NULL;
- newblock->next = p->next;
- newblock->prev = p;
- p->next->prev = newblock;
- p->next = newblock;
- p->size = size;
- }
-
- out:
- /* Our block is in the middle */
- p->filp = fpriv;
- return (p);
-}
-
-static struct mem_block *alloc_block(struct mem_block *heap, int size,
- int align2, drm_file_t *fpriv)
-{
- struct mem_block *p;
- int mask = (1 << align2) - 1;
-
- for (p = heap->next; p != heap; p = p->next) {
- int start = (p->start + mask) & ~mask;
- if (p->filp == NULL && start + size <= p->start + p->size)
- return split_block(p, start, size, fpriv);
- }
-
- return NULL;
-}
-
-static struct mem_block *find_block(struct mem_block *heap, int start)
-{
- struct mem_block *p;
-
- for (p = heap->next; p != heap; p = p->next)
- if (p->start == start)
- return (p);
-
- return (NULL);
-}
-
-struct mem_block *find_block_by_proc(struct mem_block *heap, drm_file_t *fpriv)
-{
- struct mem_block *p;
-
- for (p = heap->next; p != heap; p = p->next)
- if (p->filp == fpriv)
- return (p);
-
- return (NULL);
-}
-
-void free_block(struct mem_block *p)
-{
- p->filp = NULL;
-
- /* Assumes a single contiguous range. Needs a special filp in
- * 'heap' to stop it being subsumed.
- */
- if (p->next->filp == NULL) {
- struct mem_block *q = p->next;
- p->size += q->size;
- p->next = q->next;
- p->next->prev = p;
- drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
- }
-
- if (p->prev->filp == NULL) {
- struct mem_block *q = p->prev;
- q->size += p->size;
- q->next = p->next;
- q->next->prev = q;
- drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
- }
-}
-
-/* Initialize. How to check for an uninitialized heap?
- */
-static int init_heap(struct mem_block **heap, int start, int size)
-{
- struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
-
- if (!blocks)
- return (ENOMEM);
-
- *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
- if (!*heap) {
- drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
- return (ENOMEM);
- }
-
- blocks->start = start;
- blocks->size = size;
- blocks->filp = NULL;
- blocks->next = blocks->prev = *heap;
-
- (void) memset(*heap, 0, sizeof(**heap));
- (*heap)->filp = (drm_file_t *) - 1;
- (*heap)->next = (*heap)->prev = blocks;
- return (0);
-}
-
-/* Free all blocks associated with the releasing file.
- */
-void i915_mem_release(drm_device_t * dev, drm_file_t *fpriv, struct mem_block *heap)
-{
- struct mem_block *p;
-
- if (!heap || !heap->next)
- return;
-
- for (p = heap->next; p != heap; p = p->next) {
- if (p->filp == fpriv) {
- p->filp = NULL;
- mark_block(dev, p, 0);
- }
- }
-
- /* Assumes a single contiguous range. Needs a special filp in
- * 'heap' to stop it being subsumed.
- */
- for (p = heap->next; p != heap; p = p->next) {
- while (p->filp == NULL && p->next->filp == NULL) {
- struct mem_block *q = p->next;
- p->size += q->size;
- p->next = q->next;
- p->next->prev = p;
- drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
- }
- }
-}
-
-/* Shutdown.
- */
-void i915_mem_takedown(struct mem_block **heap)
-{
- struct mem_block *p;
-
- if (!*heap)
- return;
-
- for (p = (*heap)->next; p != *heap;) {
- struct mem_block *q = p;
- p = p->next;
- drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
- }
-
- drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
- *heap = NULL;
-}
-
-struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
-{
- switch (region) {
- case I915_MEM_REGION_AGP:
- return (&dev_priv->agp_heap);
- default:
- return (NULL);
- }
-}
-
-/* IOCTL HANDLERS */
-
-/*ARGSUSED*/
-int i915_mem_alloc(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_alloc_t alloc;
- struct mem_block *block, **heap;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return (EINVAL);
- }
-
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_i915_mem_alloc32_t alloc32;
-
- DRM_COPYFROM_WITH_RETURN(&alloc32, (void *)data, sizeof (alloc32));
- alloc.region = alloc32.region;
- alloc.alignment = alloc32.alignment;
- alloc.size = alloc32.size;
- alloc.region_offset = (int *)(uintptr_t)alloc32.region_offset;
- } else
- DRM_COPYFROM_WITH_RETURN(&alloc, (void *) data, sizeof(alloc));
-
- heap = get_heap(dev_priv, alloc.region);
- if (!heap || !*heap)
- return (EFAULT);
-
- /* Make things easier on ourselves: all allocations at least
- * 4k aligned.
- */
- if (alloc.alignment < 12)
- alloc.alignment = 12;
-
- block = alloc_block(*heap, alloc.size, alloc.alignment, fpriv);
-
- if (!block)
- return (ENOMEM);
-
- mark_block(dev, block, 1);
-
- if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return (EFAULT);
- }
-
- return (0);
-}
-
-/*ARGSUSED*/
-int i915_mem_free(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_free_t memfree;
- struct mem_block *block, **heap;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return (EINVAL);
- }
-
- DRM_COPYFROM_WITH_RETURN(&memfree, (void *)data, sizeof(memfree));
-
- heap = get_heap(dev_priv, memfree.region);
- if (!heap || !*heap)
- return (EFAULT);
-
- block = find_block(*heap, memfree.region_offset);
- if (!block)
- return (EFAULT);
-
- if (block->filp != fpriv)
- return (EPERM);
-
- mark_block(dev, block, 0);
- free_block(block);
- return (0);
-}
-
-/*ARGSUSED*/
-int i915_mem_init_heap(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_init_heap_t initheap;
- struct mem_block **heap;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return (EINVAL);
- }
-
- DRM_COPYFROM_WITH_RETURN(&initheap, (void *)data, sizeof(initheap));
-
- heap = get_heap(dev_priv, initheap.region);
- if (!heap)
- return (EFAULT);
-
- if (*heap) {
- DRM_ERROR("heap already initialized?");
- return (EFAULT);
- }
-
- return init_heap(heap, initheap.start, initheap.size);
-}
-
-/*ARGSUSED*/
-int i915_mem_destroy_heap(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_destroy_heap_t destroyheap;
- struct mem_block **heap;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return (EINVAL);
- }
-
- DRM_COPYFROM_WITH_RETURN(&destroyheap, (void *)data, sizeof(destroyheap));
-
- heap = get_heap(dev_priv, destroyheap.region);
- if (!heap) {
- DRM_ERROR("get_heap failed");
- return (EFAULT);
- }
-
- if (!*heap) {
- DRM_ERROR("heap not initialized?");
- return (EFAULT);
- }
-
- i915_mem_takedown(heap);
- return (0);
-}
diff --git a/usr/src/uts/intel/io/i915/LICENSE_I915 b/usr/src/uts/intel/io/i915/LICENSE_I915
new file mode 100644
index 0000000..8a49082
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/LICENSE_I915
@@ -0,0 +1,1432 @@
+
+File: dvo_ch7017.c
+
+/*
+ * Copyright (c) 2006, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: dvo_ch7xxx.c
+
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+
+-------------------------------------------------------------------------
+
+File: dvo_ivch.c
+
+/*
+ * Copyright (c) 2006, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: dvo_ns2501.c
+
+/*
+ *
+ * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: dvo_sil164.c
+
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+-------------------------------------------------------------------------
+
+File: dvo_tfp410.c
+
+/*
+ * Copyright © 2007 Dave Mueller
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Mueller <dave.mueller@gmx.ch>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: dvo.h
+
+/*
+ * Copyright © 2006 Eric Anholt
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_dma.c
+
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_drm.h
+
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_drv.c
+
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_drv.h
+
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_gem_context.c
+
+/*
+ * Copyright © 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_gem_debug.c
+
+/*
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_gem_evict.c
+
+/*
+ * Copyright (c) 2008-2010, 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uuk>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_gem_execbuffer.c
+
+/*
+ * Copyright © 2008,2010, 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_gem_gtt.c
+
+/*
+ * Copyright © 2010 Daniel Vetter
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_gem_stolen.c
+
+/*
+ * Copyright © 2008-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_gem_tiling.c
+
+/*
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_gem.c
+
+/*
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_irq.c
+
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2009, 2012, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_reg.h
+
+/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_suspend.c
+
+/*
+ *
+ * Copyright 2008, 2013, (c) Intel Corporation
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: i915_ums.c
+
+/*
+ *
+ * Copyright 2008 (c) Intel Corporation
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ * Copyright 2013 (c) Intel Corporation
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_bios.c
+
+/*
+ * Copyright (c) 2006, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_bios.h
+
+/*
+ * Copyright (c) 2006, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_crt.c
+
+/*
+ * Copyright (c) 2006-2007, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_ddi.c
+
+/*
+ * Copyright © 2012-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_display.c
+
+/*
+ * Copyright (c) 2006-2007, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_dp.c
+
+/*
+ * Copyright (c) 2008, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_drv.h
+
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_dvo.c
+
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2006-2007, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_fb.c
+
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * David Airlie
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_hdmi.c
+
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2006-2009, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_i2c.c
+
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2006-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_lvds.c
+
+/*
+ * Copyright (c) 2006-2007, 2013, Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_modes.c
+
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_overlay.c
+
+/*
+ * Copyright © 2009
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel@ffwll.ch>
+ *
+ * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_panel.c
+
+/*
+ * Copyright (c) 2006-2010, 2013, Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_pm.c
+
+/*
+ * Copyright © 2012-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_ringbuffer.c
+
+/*
+ * Copyright (c) 2008-2010, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Zou Nan hai <nanhai.zou@intel.com>
+ * Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_ringbuffer.h
+
+/*
+ * Copyright (c) 2008-2010, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Zou Nan hai <nanhai.zou@intel.com>
+ * Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_sdvo_regs.h
+
+/*
+ * Copyright (c) 2006-2007, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_sdvo.c
+
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2006-2007, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_sideband.c
+
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_sprite.c
+
+/*
+ * Copyright (c) 2011, 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: intel_tv.c
+
+/*
+ * Copyright (c) 2006-2013 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+-------------------------------------------------------------------------
diff --git a/usr/src/uts/intel/io/i915/LICENSE_I915.descrip b/usr/src/uts/intel/io/i915/LICENSE_I915.descrip
new file mode 100644
index 0000000..075d8a7
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/LICENSE_I915.descrip
@@ -0,0 +1 @@
+I915 DRIVER
diff --git a/usr/src/uts/intel/io/i915/dvo.h b/usr/src/uts/intel/io/i915/dvo.h
new file mode 100644
index 0000000..fa2a7f1
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/dvo.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Copyright © 2006 Eric Anholt
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _INTEL_DVO_H
+#define _INTEL_DVO_H
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_sun_i2c.h"
+#include "intel_drv.h"
+
+struct intel_dvo_device {
+ const char *name;
+ int type;
+ /* DVOA/B/C output register */
+ u32 dvo_reg;
+ /* GPIO register used for i2c bus to control this device */
+ u32 gpio;
+ int slave_addr;
+
+ const struct intel_dvo_dev_ops *dev_ops;
+ void *dev_priv;
+ struct i2c_adapter *i2c_bus;
+};
+
+struct intel_dvo_dev_ops {
+ /*
+ * Initialize the device at startup time.
+ * Returns NULL if the device does not exist.
+ */
+ bool (*init)(struct intel_dvo_device *dvo,
+ struct i2c_adapter *i2cbus);
+
+ /*
+ * Called to allow the output a chance to create properties after the
+ * RandR objects have been created.
+ */
+ void (*create_resources)(struct intel_dvo_device *dvo);
+
+ /*
+ * Turn on/off output or set intermediate power levels if available.
+ *
+ * Unsupported intermediate modes drop to the lower power setting.
+ * If the mode is DPMSModeOff, the output must be disabled,
+ */
+ void (*dpms)(struct intel_dvo_device *dvo, bool enable);
+
+ /*
+ * Callback for testing a video mode for a given output.
+ *
+ * This function should only check for cases where a mode can't
+ * be supported on the output specifically, and not represent
+ * generic CRTC limitations.
+ *
+ * \return MODE_OK if the mode is valid, or another MODE_* otherwise.
+ */
+ int (*mode_valid)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode);
+
+ /*
+ * Callback to adjust the mode to be set in the CRTC.
+ *
+ * This allows an output to adjust the clock or even the entire set of
+ * timings, which is used for panels with fixed timings or for
+ * buses with clock limitations.
+ */
+ bool (*mode_fixup)(struct intel_dvo_device *dvo,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ /*
+ * Callback for preparing mode changes on an output
+ */
+ void (*prepare)(struct intel_dvo_device *dvo);
+
+ /*
+ * Callback for committing mode changes on an output
+ */
+ void (*commit)(struct intel_dvo_device *dvo);
+
+ /*
+ * Callback for setting up a video mode after fixups have been made.
+ *
+ * This is only called while the output is disabled. The dpms callback
+ * must be all that's necessary for the output, to turn the output on
+ * after this function is called.
+ */
+ void (*mode_set)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ /*
+ * Probe for a connected output, and return detect_status.
+ */
+ enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
+
+ /*
+ * Probe the current hw status, returning true if the connected output
+ * is active.
+ */
+ bool (*get_hw_state)(struct intel_dvo_device *dev);
+
+ /**
+ * Query the device for the modes it provides.
+ *
+ * This function may also update MonInfo, mm_width, and mm_height.
+ *
+ * \return singly-linked list of modes or NULL if no modes found.
+ */
+ struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
+
+ /**
+ * Clean up driver-specific bits of the output
+ */
+ void (*destroy) (struct intel_dvo_device *dvo);
+
+ /**
+ * Debugging hook to dump device registers to log file
+ */
+ void (*dump_regs)(struct intel_dvo_device *dvo);
+};
+
+extern struct intel_dvo_dev_ops sil164_ops;
+extern struct intel_dvo_dev_ops ch7xxx_ops;
+extern struct intel_dvo_dev_ops ivch_ops;
+extern struct intel_dvo_dev_ops tfp410_ops;
+extern struct intel_dvo_dev_ops ch7017_ops;
+extern struct intel_dvo_dev_ops ns2501_ops;
+
+#endif /* _INTEL_DVO_H */
diff --git a/usr/src/uts/intel/io/i915/dvo_ch7017.c b/usr/src/uts/intel/io/i915/dvo_ch7017.c
new file mode 100644
index 0000000..3fda84f
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/dvo_ch7017.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "dvo.h"
+
+#define CH7017_TV_DISPLAY_MODE 0x00
+#define CH7017_FLICKER_FILTER 0x01
+#define CH7017_VIDEO_BANDWIDTH 0x02
+#define CH7017_TEXT_ENHANCEMENT 0x03
+#define CH7017_START_ACTIVE_VIDEO 0x04
+#define CH7017_HORIZONTAL_POSITION 0x05
+#define CH7017_VERTICAL_POSITION 0x06
+#define CH7017_BLACK_LEVEL 0x07
+#define CH7017_CONTRAST_ENHANCEMENT 0x08
+#define CH7017_TV_PLL 0x09
+#define CH7017_TV_PLL_M 0x0a
+#define CH7017_TV_PLL_N 0x0b
+#define CH7017_SUB_CARRIER_0 0x0c
+#define CH7017_CIV_CONTROL 0x10
+#define CH7017_CIV_0 0x11
+#define CH7017_CHROMA_BOOST 0x14
+#define CH7017_CLOCK_MODE 0x1c
+#define CH7017_INPUT_CLOCK 0x1d
+#define CH7017_GPIO_CONTROL 0x1e
+#define CH7017_INPUT_DATA_FORMAT 0x1f
+#define CH7017_CONNECTION_DETECT 0x20
+#define CH7017_DAC_CONTROL 0x21
+#define CH7017_BUFFERED_CLOCK_OUTPUT 0x22
+#define CH7017_DEFEAT_VSYNC 0x47
+#define CH7017_TEST_PATTERN 0x48
+
+#define CH7017_POWER_MANAGEMENT 0x49
+/** Enables the TV output path. */
+#define CH7017_TV_EN (1 << 0)
+#define CH7017_DAC0_POWER_DOWN (1 << 1)
+#define CH7017_DAC1_POWER_DOWN (1 << 2)
+#define CH7017_DAC2_POWER_DOWN (1 << 3)
+#define CH7017_DAC3_POWER_DOWN (1 << 4)
+/** Powers down the TV out block, and DAC0-3 */
+#define CH7017_TV_POWER_DOWN_EN (1 << 5)
+
+#define CH7017_VERSION_ID 0x4a
+
+#define CH7017_DEVICE_ID 0x4b
+#define CH7017_DEVICE_ID_VALUE 0x1b
+#define CH7018_DEVICE_ID_VALUE 0x1a
+#define CH7019_DEVICE_ID_VALUE 0x19
+
+#define CH7017_XCLK_D2_ADJUST 0x53
+#define CH7017_UP_SCALER_COEFF_0 0x55
+#define CH7017_UP_SCALER_COEFF_1 0x56
+#define CH7017_UP_SCALER_COEFF_2 0x57
+#define CH7017_UP_SCALER_COEFF_3 0x58
+#define CH7017_UP_SCALER_COEFF_4 0x59
+#define CH7017_UP_SCALER_VERTICAL_INC_0 0x5a
+#define CH7017_UP_SCALER_VERTICAL_INC_1 0x5b
+#define CH7017_GPIO_INVERT 0x5c
+#define CH7017_UP_SCALER_HORIZONTAL_INC_0 0x5d
+#define CH7017_UP_SCALER_HORIZONTAL_INC_1 0x5e
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT 0x5f
+/**< Low bits of horizontal active pixel input */
+
+#define CH7017_ACTIVE_INPUT_LINE_OUTPUT 0x60
+/** High bits of horizontal active pixel input */
+#define CH7017_LVDS_HAP_INPUT_MASK (0x7 << 0)
+/** High bits of vertical active line output */
+#define CH7017_LVDS_VAL_HIGH_MASK (0x7 << 3)
+
+#define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT 0x61
+/**< Low bits of vertical active line output */
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT 0x62
+/**< Low bits of horizontal active pixel output */
+
+#define CH7017_LVDS_POWER_DOWN 0x63
+/** High bits of horizontal active pixel output */
+#define CH7017_LVDS_HAP_HIGH_MASK (0x7 << 0)
+/** Enables the LVDS power down state transition */
+#define CH7017_LVDS_POWER_DOWN_EN (1 << 6)
+/** Enables the LVDS upscaler */
+#define CH7017_LVDS_UPSCALER_EN (1 << 7)
+#define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08
+
+#define CH7017_LVDS_ENCODING 0x64
+#define CH7017_LVDS_DITHER_2D (1 << 2)
+#define CH7017_LVDS_DITHER_DIS (1 << 3)
+#define CH7017_LVDS_DUAL_CHANNEL_EN (1 << 4)
+#define CH7017_LVDS_24_BIT (1 << 5)
+
+#define CH7017_LVDS_ENCODING_2 0x65
+
+#define CH7017_LVDS_PLL_CONTROL 0x66
+/** Enables the LVDS panel output path */
+#define CH7017_LVDS_PANEN (1 << 0)
+/** Enables the LVDS panel backlight */
+#define CH7017_LVDS_BKLEN (1 << 3)
+
+#define CH7017_POWER_SEQUENCING_T1 0x67
+#define CH7017_POWER_SEQUENCING_T2 0x68
+#define CH7017_POWER_SEQUENCING_T3 0x69
+#define CH7017_POWER_SEQUENCING_T4 0x6a
+#define CH7017_POWER_SEQUENCING_T5 0x6b
+#define CH7017_GPIO_DRIVER_TYPE 0x6c
+#define CH7017_GPIO_DATA 0x6d
+#define CH7017_GPIO_DIRECTION_CONTROL 0x6e
+
+#define CH7017_LVDS_PLL_FEEDBACK_DIV 0x71
+# define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4
+# define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0
+# define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80
+
+#define CH7017_LVDS_PLL_VCO_CONTROL 0x72
+# define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80
+# define CH7017_LVDS_PLL_VCO_SHIFT 4
+# define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0
+
+#define CH7017_OUTPUTS_ENABLE 0x73
+# define CH7017_CHARGE_PUMP_LOW 0x0
+# define CH7017_CHARGE_PUMP_HIGH 0x3
+# define CH7017_LVDS_CHANNEL_A (1 << 3)
+# define CH7017_LVDS_CHANNEL_B (1 << 4)
+# define CH7017_TV_DAC_A (1 << 5)
+# define CH7017_TV_DAC_B (1 << 6)
+# define CH7017_DDC_SELECT_DC2 (1 << 7)
+
+#define CH7017_LVDS_OUTPUT_AMPLITUDE 0x74
+#define CH7017_LVDS_PLL_EMI_REDUCTION 0x75
+#define CH7017_LVDS_POWER_DOWN_FLICKER 0x76
+
+#define CH7017_LVDS_CONTROL_2 0x78
+# define CH7017_LOOP_FILTER_SHIFT 5
+# define CH7017_PHASE_DETECTOR_SHIFT 0
+
+#define CH7017_BANG_LIMIT_CONTROL 0x7f
+
+struct ch7017_priv {
+ uint8_t dummy;
+};
+
+static void ch7017_dump_regs(struct intel_dvo_device *dvo);
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
+
+static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = val,
+ }
+ };
+ return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
+}
+
+static bool ch7017_write(struct intel_dvo_device *dvo, uint8_t addr, uint8_t val)
+{
+ uint8_t buf[2] = { addr, val };
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = buf,
+ };
+ return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
+}
+
+/** Probes for a CH7017 on the given bus and slave address. */
+static bool ch7017_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ struct ch7017_priv *priv;
+ const char *str;
+ u8 val;
+
+ priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = priv;
+
+ if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
+ goto fail;
+
+ switch (val) {
+ case CH7017_DEVICE_ID_VALUE:
+ str = "ch7017";
+ break;
+ case CH7018_DEVICE_ID_VALUE:
+ str = "ch7018";
+ break;
+ case CH7019_DEVICE_ID_VALUE:
+ str = "ch7019";
+ break;
+ default:
+ DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
+ "slave %d.\n",
+ val, adapter->name,dvo->slave_addr);
+ goto fail;
+ }
+
+ DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
+ str, adapter->name, dvo->slave_addr);
+ return true;
+
+fail:
+ kfree(priv, sizeof (struct ch7017_priv));
+ return false;
+}
+
+/* LINTED */
+static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
+{
+ return connector_status_connected;
+}
+
+/* LINTED */
+static int ch7017_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 160000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void ch7017_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ /* LINTED */
+ struct drm_display_mode *adjusted_mode)
+{
+ uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
+ uint8_t outputs_enable, lvds_control_2, lvds_power_down;
+ uint8_t horizontal_active_pixel_input;
+ uint8_t horizontal_active_pixel_output, vertical_active_line_output;
+ uint8_t active_input_line_output;
+
+ DRM_DEBUG_KMS("Registers before mode setting\n");
+ ch7017_dump_regs(dvo);
+
+ /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
+ if (mode->clock < 100000) {
+ outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW;
+ lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+ (13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) |
+ (0 << CH7017_PHASE_DETECTOR_SHIFT);
+ } else {
+ outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH;
+ lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+ (3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+ lvds_pll_feedback_div = 35;
+ lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) |
+ (0 << CH7017_PHASE_DETECTOR_SHIFT);
+ /* LINTED */
+ if (1) { /* XXX: dual channel panel detection. Assume yes for now. */
+ outputs_enable |= CH7017_LVDS_CHANNEL_B;
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ } else {
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (1 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ }
+ }
+
+ horizontal_active_pixel_input = mode->hdisplay & 0x00ff;
+
+ vertical_active_line_output = mode->vdisplay & 0x00ff;
+ horizontal_active_pixel_output = mode->hdisplay & 0x00ff;
+
+ active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) |
+ (((mode->vdisplay & 0x0700) >> 8) << 3);
+
+ lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
+ (mode->hdisplay & 0x0700) >> 8;
+
+ ch7017_dpms(dvo, false);
+ (void) ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
+ horizontal_active_pixel_input);
+ (void) ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
+ horizontal_active_pixel_output);
+ (void) ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT,
+ vertical_active_line_output);
+ (void) ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT,
+ active_input_line_output);
+ (void) ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control);
+ (void) ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div);
+ (void) ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2);
+ (void) ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable);
+
+ /* Turn the LVDS back on with new settings. */
+ (void) ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
+
+ DRM_DEBUG_KMS("Registers after mode setting\n");
+ ch7017_dump_regs(dvo);
+}
+
+/* set the CH7017 power state */
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ uint8_t val;
+
+ (void) ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+ /* Turn off TV/VGA, and never turn it on since we don't support it. */
+ (void) ch7017_write(dvo, CH7017_POWER_MANAGEMENT,
+ CH7017_DAC0_POWER_DOWN |
+ CH7017_DAC1_POWER_DOWN |
+ CH7017_DAC2_POWER_DOWN |
+ CH7017_DAC3_POWER_DOWN |
+ CH7017_TV_POWER_DOWN_EN);
+
+ if (enable) {
+ /* Turn on the LVDS */
+ (void) ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+ val & ~CH7017_LVDS_POWER_DOWN_EN);
+ } else {
+ /* Turn off the LVDS */
+ (void) ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+ val | CH7017_LVDS_POWER_DOWN_EN);
+ }
+
+ /* XXX: Should actually wait for update power status somehow */
+ msleep(20);
+}
+
+static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+ if (val & CH7017_LVDS_POWER_DOWN_EN)
+ return false;
+ else
+ return true;
+}
+
+static void ch7017_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+#define DUMP(reg) \
+do { \
+ (void) ch7017_read(dvo, reg, &val); \
+ DRM_DEBUG_KMS(#reg ": %02x\n", val); \
+} while (*"\0")
+
+ DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
+ DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT);
+ DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT);
+ DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT);
+ DUMP(CH7017_LVDS_PLL_VCO_CONTROL);
+ DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV);
+ DUMP(CH7017_LVDS_CONTROL_2);
+ DUMP(CH7017_OUTPUTS_ENABLE);
+ DUMP(CH7017_LVDS_POWER_DOWN);
+}
+
+static void ch7017_destroy(struct intel_dvo_device *dvo)
+{
+ struct ch7017_priv *priv = dvo->dev_priv;
+
+ if (priv) {
+ kfree(priv, sizeof(struct ch7017_priv));
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ch7017_ops = {
+ .init = ch7017_init,
+ .detect = ch7017_detect,
+ .mode_valid = ch7017_mode_valid,
+ .mode_set = ch7017_mode_set,
+ .dpms = ch7017_dpms,
+ .get_hw_state = ch7017_get_hw_state,
+ .dump_regs = ch7017_dump_regs,
+ .destroy = ch7017_destroy,
+};
diff --git a/usr/src/uts/intel/io/i915/dvo_ch7xxx.c b/usr/src/uts/intel/io/i915/dvo_ch7xxx.c
new file mode 100644
index 0000000..cfff658
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/dvo_ch7xxx.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+/*
+ * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
+ */
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "dvo.h"
+
+#define CH7xxx_REG_VID 0x4a
+#define CH7xxx_REG_DID 0x4b
+
+#define CH7011_VID 0x83 /* 7010 as well */
+#define CH7010B_VID 0x05
+#define CH7009A_VID 0x84
+#define CH7009B_VID 0x85
+#define CH7301_VID 0x95
+
+#define CH7xxx_VID 0x84
+#define CH7xxx_DID 0x17
+#define CH7010_DID 0x16
+
+#define CH7xxx_NUM_REGS 0x4c
+
+#define CH7xxx_CM 0x1c
+#define CH7xxx_CM_XCM (1<<0)
+#define CH7xxx_CM_MCP (1<<2)
+#define CH7xxx_INPUT_CLOCK 0x1d
+#define CH7xxx_GPIO 0x1e
+#define CH7xxx_GPIO_HPIR (1<<3)
+#define CH7xxx_IDF 0x1f
+
+#define CH7xxx_IDF_HSP (1<<3)
+#define CH7xxx_IDF_VSP (1<<4)
+
+#define CH7xxx_CONNECTION_DETECT 0x20
+#define CH7xxx_CDET_DVI (1<<5)
+
+#define CH7301_DAC_CNTL 0x21
+#define CH7301_HOTPLUG 0x23
+#define CH7xxx_TCTL 0x31
+#define CH7xxx_TVCO 0x32
+#define CH7xxx_TPCP 0x33
+#define CH7xxx_TPD 0x34
+#define CH7xxx_TPVT 0x35
+#define CH7xxx_TLPF 0x36
+#define CH7xxx_TCT 0x37
+#define CH7301_TEST_PATTERN 0x48
+
+#define CH7xxx_PM 0x49
+#define CH7xxx_PM_FPD (1<<0)
+#define CH7301_PM_DACPD0 (1<<1)
+#define CH7301_PM_DACPD1 (1<<2)
+#define CH7301_PM_DACPD2 (1<<3)
+#define CH7xxx_PM_DVIL (1<<6)
+#define CH7xxx_PM_DVIP (1<<7)
+
+#define CH7301_SYNC_POLARITY 0x56
+#define CH7301_SYNC_RGB_YUV (1<<0)
+#define CH7301_SYNC_POL_DVI (1<<5)
+
+/** @file
+ * driver for the Chrontel 7xxx DVI chip over DVO.
+ */
+
+static struct ch7xxx_id_struct {
+ uint8_t vid;
+ char *name;
+} ch7xxx_ids[] = {
+ { CH7011_VID, "CH7011" },
+ { CH7010B_VID, "CH7010B" },
+ { CH7009A_VID, "CH7009A" },
+ { CH7009B_VID, "CH7009B" },
+ { CH7301_VID, "CH7301" },
+};
+
+static struct ch7xxx_did_struct {
+ uint8_t did;
+ char *name;
+} ch7xxx_dids[] = {
+ { CH7xxx_DID, "CH7XXX" },
+ { CH7010_DID, "CH7010B" },
+};
+
+struct ch7xxx_priv {
+ bool quiet;
+};
+
+static char *ch7xxx_get_id(uint8_t vid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) {
+ if (ch7xxx_ids[i].vid == vid)
+ return ch7xxx_ids[i].name;
+ }
+
+ return NULL;
+}
+
+static char *ch7xxx_get_did(uint8_t did)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ch7xxx_dids); i++) {
+ if (ch7xxx_dids[i].did == did)
+ return ch7xxx_dids[i].name;
+ }
+
+ return NULL;
+}
+
+/** Reads an 8 bit register */
+static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+ struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = (u8) addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ };
+
+ if (!ch7xxx->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+ return false;
+}
+
+/** Writes an 8 bit register */
+static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = (uint8_t) addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(adapter, &msg, 1) == 1)
+ return true;
+
+ if (!ch7xxx->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+static bool ch7xxx_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ /* this will detect the CH7xxx chip on the specified i2c bus */
+ struct ch7xxx_priv *ch7xxx;
+ uint8_t vendor, device;
+ char *name, *devid;
+
+ ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
+ if (ch7xxx == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = ch7xxx;
+ ch7xxx->quiet = true;
+
+ if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor))
+ goto out;
+
+ name = ch7xxx_get_id(vendor);
+ if (!name) {
+ DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+ "slave %d.\n",
+ vendor, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+
+
+ if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
+ goto out;
+
+ devid = ch7xxx_get_did(device);
+ if (!devid) {
+ DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+ "slave %d.\n",
+ vendor, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+
+ ch7xxx->quiet = false;
+ DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
+ name, vendor, device);
+ return true;
+out:
+ kfree(ch7xxx, sizeof (struct ch7xxx_priv));
+ return false;
+}
+
+static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
+{
+ uint8_t cdet, orig_pm, pm;
+
+ (void) ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
+
+ pm = orig_pm;
+ pm &= ~CH7xxx_PM_FPD;
+ pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP;
+
+ (void) ch7xxx_writeb(dvo, CH7xxx_PM, pm);
+
+ (void) ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet);
+
+ (void) ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm);
+
+ if (cdet & CH7xxx_CDET_DVI)
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
+/* LINTED */
+static int ch7xxx_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ /* LINTED */
+ struct drm_display_mode *adjusted_mode)
+{
+ uint8_t tvco, tpcp, tpd, tlpf, idf;
+
+ if (mode->clock <= 65000) {
+ tvco = 0x23;
+ tpcp = 0x08;
+ tpd = 0x16;
+ tlpf = 0x60;
+ } else {
+ tvco = 0x2d;
+ tpcp = 0x06;
+ tpd = 0x26;
+ tlpf = 0xa0;
+ }
+
+ (void) ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00);
+ (void) ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco);
+ (void) ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp);
+ (void) ch7xxx_writeb(dvo, CH7xxx_TPD, tpd);
+ (void) ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30);
+ (void) ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf);
+ (void) ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00);
+
+ (void) ch7xxx_readb(dvo, CH7xxx_IDF, &idf);
+
+ idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP);
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ idf |= CH7xxx_IDF_HSP;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ idf |= CH7xxx_IDF_HSP;
+
+ (void) ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
+}
+
+/* set the CH7xxx power state */
+static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ if (enable)
+ (void) ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
+ else
+ (void) ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
+}
+
+static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
+{
+ u8 val;
+
+ ch7xxx_readb(dvo, CH7xxx_PM, &val);
+
+ if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
+ return true;
+ else
+ return false;
+}
+
+static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
+{
+ int i;
+
+ for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+ uint8_t val;
+ if ((i % 8) == 0 )
+ DRM_LOG_KMS("\n %02X: ", i);
+ (void) ch7xxx_readb(dvo, i, &val);
+ DRM_LOG_KMS("%02X ", val);
+}
+}
+
+static void ch7xxx_destroy(struct intel_dvo_device *dvo)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+
+ if (ch7xxx) {
+ kfree(ch7xxx, sizeof (struct ch7xxx_priv));
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ch7xxx_ops = {
+ .init = ch7xxx_init,
+ .detect = ch7xxx_detect,
+ .mode_valid = ch7xxx_mode_valid,
+ .mode_set = ch7xxx_mode_set,
+ .dpms = ch7xxx_dpms,
+ .get_hw_state = ch7xxx_get_hw_state,
+ .dump_regs = ch7xxx_dump_regs,
+ .destroy = ch7xxx_destroy,
+};
diff --git a/usr/src/uts/intel/io/i915/dvo_ivch.c b/usr/src/uts/intel/io/i915/dvo_ivch.c
new file mode 100644
index 0000000..cded3aa
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/dvo_ivch.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "dvo.h"
+
+/*
+ * register definitions for the i82807aa.
+ *
+ * Documentation on this chipset can be found in datasheet #29069001 at
+ * intel.com.
+ */
+
+/*
+ * VCH Revision & GMBus Base Addr
+ */
+#define VR00 0x00
+# define VR00_BASE_ADDRESS_MASK 0x007f
+
+/*
+ * Functionality Enable
+ */
+#define VR01 0x01
+
+/*
+ * Enable the panel fitter
+ */
+# define VR01_PANEL_FIT_ENABLE (1 << 3)
+/*
+ * Enables the LCD display.
+ *
+ * This must not be set while VR01_DVO_BYPASS_ENABLE is set.
+ */
+# define VR01_LCD_ENABLE (1 << 2)
+/** Enables the DVO repeater. */
+# define VR01_DVO_BYPASS_ENABLE (1 << 1)
+/** Enables the DVO clock */
+# define VR01_DVO_ENABLE (1 << 0)
+
+/*
+ * LCD Interface Format
+ */
+#define VR10 0x10
+/** Enables LVDS output instead of CMOS */
+# define VR10_LVDS_ENABLE (1 << 4)
+/** Enables 18-bit LVDS output. */
+# define VR10_INTERFACE_1X18 (0 << 2)
+/** Enables 24-bit LVDS or CMOS output */
+# define VR10_INTERFACE_1X24 (1 << 2)
+/** Enables 2x18-bit LVDS or CMOS output. */
+# define VR10_INTERFACE_2X18 (2 << 2)
+/** Enables 2x24-bit LVDS output */
+# define VR10_INTERFACE_2X24 (3 << 2)
+
+/*
+ * VR20 LCD Horizontal Display Size
+ */
+#define VR20 0x20
+
+/*
+ * LCD Vertical Display Size
+ */
+#define VR21 0x20
+
+/*
+ * Panel power down status
+ */
+#define VR30 0x30
+/** Read only bit indicating that the panel is not in a safe poweroff state. */
+# define VR30_PANEL_ON (1 << 15)
+
+#define VR40 0x40
+# define VR40_STALL_ENABLE (1 << 13)
+# define VR40_VERTICAL_INTERP_ENABLE (1 << 12)
+# define VR40_ENHANCED_PANEL_FITTING (1 << 11)
+# define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10)
+# define VR40_AUTO_RATIO_ENABLE (1 << 9)
+# define VR40_CLOCK_GATING_ENABLE (1 << 8)
+
+/*
+ * Panel Fitting Vertical Ratio
+ * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2
+ */
+#define VR41 0x41
+
+/*
+ * Panel Fitting Horizontal Ratio
+ * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2
+ */
+#define VR42 0x42
+
+/*
+ * Horizontal Image Size
+ */
+#define VR43 0x43
+
+/* VR80 GPIO 0
+ */
+#define VR80 0x80
+#define VR81 0x81
+#define VR82 0x82
+#define VR83 0x83
+#define VR84 0x84
+#define VR85 0x85
+#define VR86 0x86
+#define VR87 0x87
+
+/* VR88 GPIO 8
+ */
+#define VR88 0x88
+
+/* Graphics BIOS scratch 0
+ */
+#define VR8E 0x8E
+# define VR8E_PANEL_TYPE_MASK (0xf << 0)
+# define VR8E_PANEL_INTERFACE_CMOS (0 << 4)
+# define VR8E_PANEL_INTERFACE_LVDS (1 << 4)
+# define VR8E_FORCE_DEFAULT_PANEL (1 << 5)
+
+/* Graphics BIOS scratch 1
+ */
+#define VR8F 0x8F
+# define VR8F_VCH_PRESENT (1 << 0)
+# define VR8F_DISPLAY_CONN (1 << 1)
+# define VR8F_POWER_MASK (0x3c)
+# define VR8F_POWER_POS (2)
+
+
+struct ivch_priv {
+ bool quiet;
+
+ uint16_t width, height;
+};
+
+
+static void ivch_dump_regs(struct intel_dvo_device *dvo);
+
+/**
+ * Reads a register on the ivch.
+ *
+ * Each of the 256 registers are 16 bits long.
+ */
+static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[1];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 0,
+ },
+ {
+ .addr = 0,
+ .flags = I2C_M_NOSTART,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD | I2C_M_NOSTART,
+ .len = 2,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = (u8) addr;
+
+ if (i2c_transfer(adapter, msgs, 3) == 3) {
+ *data = (in_buf[1] << 8) | in_buf[0];
+ return true;
+ };
+
+ if (!priv->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from "
+ "%s:%02x.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+ return false;
+}
+
+/** Writes a 16-bit register on the ivch */
+static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[3];
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 3,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = (u8) addr;
+ out_buf[1] = data & 0xff;
+ out_buf[2] = data >> 8;
+
+ if (i2c_transfer(adapter, &msg, 1) == 1)
+ return true;
+
+ if (!priv->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/** Probes the given bus and slave address for an ivch */
+static bool ivch_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ struct ivch_priv *priv;
+ uint16_t temp;
+
+ priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = priv;
+ priv->quiet = true;
+
+ if (!ivch_read(dvo, VR00, &temp))
+ goto out;
+ priv->quiet = false;
+
+ /* Since the identification bits are probably zeroes, which doesn't seem
+ * very unique, check that the value in the base address field matches
+ * the address it's responding on.
+ */
+ if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
+ DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
+ "(%d vs %d)\n",
+ (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
+ goto out;
+ }
+
+ (void) ivch_read(dvo, VR20, &priv->width);
+ (void) ivch_read(dvo, VR21, &priv->height);
+
+ return true;
+
+out:
+ kfree(priv, sizeof (struct ivch_priv));
+ return false;
+}
+
+/* LINTED */
+static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo)
+{
+ return connector_status_connected;
+}
+
+/* LINTED */
+static int ivch_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 112000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+/** Sets the power state of the panel connected to the ivch */
+static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ int i;
+ uint16_t vr01, vr30, backlight;
+
+ /* Set the new power state of the panel. */
+ if (!ivch_read(dvo, VR01, &vr01))
+ return;
+
+ if (enable)
+ backlight = 1;
+ else
+ backlight = 0;
+ (void) ivch_write(dvo, VR80, backlight);
+
+ if (enable)
+ vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
+ else
+ vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
+
+ (void) ivch_write(dvo, VR01, vr01);
+
+ /* Wait for the panel to make its state transition */
+ for (i = 0; i < 100; i++) {
+ if (!ivch_read(dvo, VR30, &vr30))
+ break;
+
+ if (((vr30 & VR30_PANEL_ON) != 0) == enable)
+ break;
+ udelay(1000);
+ }
+ /* wait some more; vch may fail to resync sometimes without this */
+ udelay(16 * 1000);
+}
+
+static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint16_t vr01;
+
+ /* Set the new power state of the panel. */
+ if (!ivch_read(dvo, VR01, &vr01))
+ return false;
+
+ if (vr01 & VR01_LCD_ENABLE)
+ return true;
+ else
+ return false;
+}
+
+static void ivch_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ uint16_t vr40 = 0;
+ uint16_t vr01;
+
+ vr01 = 0;
+ vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
+ VR40_HORIZONTAL_INTERP_ENABLE);
+
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay) {
+ uint16_t x_ratio, y_ratio;
+
+ vr01 |= VR01_PANEL_FIT_ENABLE;
+ vr40 |= VR40_CLOCK_GATING_ENABLE;
+ x_ratio = (((mode->hdisplay - 1) << 16) /
+ (adjusted_mode->hdisplay - 1)) >> 2;
+ y_ratio = (((mode->vdisplay - 1) << 16) /
+ (adjusted_mode->vdisplay - 1)) >> 2;
+ (void) ivch_write (dvo, VR42, x_ratio);
+ (void) ivch_write (dvo, VR41, y_ratio);
+ } else {
+ vr01 &= ~VR01_PANEL_FIT_ENABLE;
+ vr40 &= ~VR40_CLOCK_GATING_ENABLE;
+ }
+ vr40 &= ~VR40_AUTO_RATIO_ENABLE;
+
+ (void) ivch_write(dvo, VR01, vr01);
+ (void) ivch_write(dvo, VR40, vr40);
+
+ ivch_dump_regs(dvo);
+}
+
+static void ivch_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint16_t val;
+
+ (void) ivch_read(dvo, VR00, &val);
+ DRM_LOG_KMS("VR00: 0x%04x\n", val);
+ (void) ivch_read(dvo, VR01, &val);
+ DRM_LOG_KMS("VR01: 0x%04x\n", val);
+ (void) ivch_read(dvo, VR30, &val);
+ DRM_LOG_KMS("VR30: 0x%04x\n", val);
+ (void) ivch_read(dvo, VR40, &val);
+ DRM_LOG_KMS("VR40: 0x%04x\n", val);
+
+ /* GPIO registers */
+ (void) ivch_read(dvo, VR80, &val);
+ DRM_LOG_KMS("VR80: 0x%04x\n", val);
+ (void) ivch_read(dvo, VR81, &val);
+ DRM_LOG_KMS("VR81: 0x%04x\n", val);
+ (void) ivch_read(dvo, VR82, &val);
+ DRM_LOG_KMS("VR82: 0x%04x\n", val);
+ (void) ivch_read(dvo, VR83, &val);
+ DRM_LOG_KMS("VR83: 0x%04x\n", val);
+ (void) ivch_read(dvo, VR84, &val);
+ DRM_LOG_KMS("VR84: 0x%04x\n", val);
+ (void) ivch_read(dvo, VR85, &val);
+ DRM_LOG_KMS("VR85: 0x%04x\n", val);
+ (void) ivch_read(dvo, VR86, &val);
+ DRM_LOG_KMS("VR86: 0x%04x\n", val);
+ (void) ivch_read(dvo, VR87, &val);
+ DRM_LOG_KMS("VR87: 0x%04x\n", val);
+ (void) ivch_read(dvo, VR88, &val);
+ DRM_LOG_KMS("VR88: 0x%04x\n", val);
+
+ /* Scratch register 0 - AIM Panel type */
+ (void) ivch_read(dvo, VR8E, &val);
+ DRM_LOG_KMS("VR8E: 0x%04x\n", val);
+
+ /* Scratch register 1 - Status register */
+ (void) ivch_read(dvo, VR8F, &val);
+ DRM_LOG_KMS("VR8F: 0x%04x\n", val);
+}
+
+static void ivch_destroy(struct intel_dvo_device *dvo)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+
+ if (priv) {
+ kfree(priv, sizeof(struct ivch_priv));
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ivch_ops= {
+ .init = ivch_init,
+ .dpms = ivch_dpms,
+ .get_hw_state = ivch_get_hw_state,
+ .mode_valid = ivch_mode_valid,
+ .mode_set = ivch_mode_set,
+ .detect = ivch_detect,
+ .dump_regs = ivch_dump_regs,
+ .destroy = ivch_destroy,
+};
diff --git a/usr/src/uts/intel/io/i915/dvo_ns2501.c b/usr/src/uts/intel/io/i915/dvo_ns2501.c
new file mode 100644
index 0000000..7a3ea5c
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/dvo_ns2501.c
@@ -0,0 +1,596 @@
+/*
+ *
+ * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Copyright (c) 2013, Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include "dvo.h"
+#include "i915_reg.h"
+#include "i915_drv.h"
+
+#define NS2501_VID 0x1305
+#define NS2501_DID 0x6726
+
+#define NS2501_VID_LO 0x00
+#define NS2501_VID_HI 0x01
+#define NS2501_DID_LO 0x02
+#define NS2501_DID_HI 0x03
+#define NS2501_REV 0x04
+#define NS2501_RSVD 0x05
+#define NS2501_FREQ_LO 0x06
+#define NS2501_FREQ_HI 0x07
+
+#define NS2501_REG8 0x08
+#define NS2501_8_VEN (1<<5)
+#define NS2501_8_HEN (1<<4)
+#define NS2501_8_DSEL (1<<3)
+#define NS2501_8_BPAS (1<<2)
+#define NS2501_8_RSVD (1<<1)
+#define NS2501_8_PD (1<<0)
+
+#define NS2501_REG9 0x09
+#define NS2501_9_VLOW (1<<7)
+#define NS2501_9_MSEL_MASK (0x7<<4)
+#define NS2501_9_TSEL (1<<3)
+#define NS2501_9_RSEN (1<<2)
+#define NS2501_9_RSVD (1<<1)
+#define NS2501_9_MDI (1<<0)
+
+#define NS2501_REGC 0x0c
+
+struct ns2501_priv {
+ //I2CDevRec d;
+ bool quiet;
+ int reg_8_shadow;
+ int reg_8_set;
+ // Shadow registers for i915
+ int dvoc;
+ int pll_a;
+ int srcdim;
+ int fw_blc;
+};
+
+#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
+
+/*
+ * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
+ * laptops does not react on the i2c bus unless
+ * both the PLL is running and the display is configured in its native
+ * resolution.
+ * This function forces the DVO on, and stores the registers it touches.
+ * Afterwards, registers are restored to regular values.
+ *
+ * This is pretty much a hack, though it works.
+ * Without that, ns2501_readb and ns2501_writeb fail
+ * when switching the resolution.
+ */
+
+static void enable_dvo(struct intel_dvo_device *dvo)
+{
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
+
+ ns->dvoc = I915_READ(DVO_C);
+ ns->pll_a = I915_READ(_DPLL_A);
+ ns->srcdim = I915_READ(DVOC_SRCDIM);
+ ns->fw_blc = I915_READ(FW_BLC);
+
+ I915_WRITE(DVOC, 0x10004084);
+ I915_WRITE(_DPLL_A, 0xd0820000);
+ I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
+ I915_WRITE(FW_BLC, 0x1080304);
+
+ I915_WRITE(DVOC, 0x90004084);
+}
+
+/*
+ * Restore the I915 registers modified by the above
+ * trigger function.
+ */
+static void restore_dvo(struct intel_dvo_device *dvo)
+{
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+ I915_WRITE(DVOC, ns->dvoc);
+ I915_WRITE(_DPLL_A, ns->pll_a);
+ I915_WRITE(DVOC_SRCDIM, ns->srcdim);
+ I915_WRITE(FW_BLC, ns->fw_blc);
+}
+
+/*
+** Read a register from the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = (u8)addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ };
+
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS
+ ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
+ adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/*
+** Write a register to the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = (uint8_t)addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(adapter, &msg, 1) == 1) {
+ return true;
+ }
+
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/* National Semiconductor 2501 driver for chip on i2c bus
+ * scan for the chip on the bus.
+ * Hope the VBIOS initialized the PLL correctly so we can
+ * talk to it. If not, it will not be seen and not detected.
+ * Bummer!
+ */
+static bool ns2501_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ /* this will detect the NS2501 chip on the specified i2c bus */
+ struct ns2501_priv *ns;
+ unsigned char ch;
+
+ ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
+ if (ns == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = ns;
+ ns->quiet = true;
+
+ if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_VID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+
+ if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_DID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+ ns->quiet = false;
+ ns->reg_8_set = 0;
+ ns->reg_8_shadow =
+ NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
+
+ DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
+ return true;
+
+out:
+ kfree(ns, sizeof(struct ns2501_priv));
+ return false;
+}
+
+static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
+{
+ /*
+ * This is a Laptop display, it doesn't have hotplugging.
+ * Even if not, the detection bit of the 2501 is unreliable as
+ * it only works for some display types.
+ * It is even more unreliable as the PLL must be active for
+ * allowing reading from the chiop.
+ */
+ return connector_status_connected;
+}
+
+static int ns2501_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS
+ ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
+ __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+ mode->vtotal);
+
+ /*
+ * Currently, these are all the modes I have data from.
+ * More might exist. Unclear how to find the native resolution
+ * of the panel in here so we could always accept it
+ * by disabling the scaler.
+ */
+ if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
+ (mode->hdisplay == 640 && mode->vdisplay == 480) ||
+ (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
+ return MODE_OK;
+ } else {
+ return MODE_ONE_SIZE; /* Is this a reasonable error? */
+ }
+}
+
+static void ns2501_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ bool ok;
+ bool restore = false;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+ DRM_DEBUG_KMS
+ ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
+ __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+ mode->vtotal);
+
+ /*
+ * Where do I find the native resolution for which scaling is not required???
+ *
+ * First trigger the DVO on as otherwise the chip does not appear on the i2c
+ * bus.
+ */
+ do {
+ ok = true;
+
+ if (mode->hdisplay == 800 && mode->vdisplay == 600) {
+ /* mode 277 */
+ ns->reg_8_shadow &= ~NS2501_8_BPAS;
+ DRM_DEBUG_KMS("%s: switching to 800x600\n",
+ __FUNCTION__);
+
+ /*
+ * No, I do not know where this data comes from.
+ * It is just what the video bios left in the DVO, so
+ * I'm just copying it here over.
+ * This also means that I cannot support any other modes
+ * except the ones supported by the bios.
+ */
+ ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
+ ok &= ns2501_writeb(dvo, 0x1b, 0x19);
+ ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
+ ok &= ns2501_writeb(dvo, 0x1d, 0x02);
+
+ ok &= ns2501_writeb(dvo, 0x34, 0x03);
+ ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+ ok &= ns2501_writeb(dvo, 0x80, 0x27);
+ ok &= ns2501_writeb(dvo, 0x81, 0x03);
+ ok &= ns2501_writeb(dvo, 0x82, 0x41);
+ ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+ ok &= ns2501_writeb(dvo, 0x8e, 0x04);
+ ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
+ ok &= ns2501_writeb(dvo, 0x91, 0x07);
+ ok &= ns2501_writeb(dvo, 0x94, 0x00);
+ ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x96, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x99, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+ ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
+ ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+ ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0xa4, 0x80);
+
+ ok &= ns2501_writeb(dvo, 0xb6, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
+
+ ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
+
+ ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
+
+ ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+ ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
+
+ ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc7, 0x73);
+ ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+ } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
+ /* mode 274 */
+ DRM_DEBUG_KMS("%s: switching to 640x480\n",
+ __FUNCTION__);
+ /*
+ * No, I do not know where this data comes from.
+ * It is just what the video bios left in the DVO, so
+ * I'm just copying it here over.
+ * This also means that I cannot support any other modes
+ * except the ones supported by the bios.
+ */
+ ns->reg_8_shadow &= ~NS2501_8_BPAS;
+
+ ok &= ns2501_writeb(dvo, 0x11, 0xa0);
+ ok &= ns2501_writeb(dvo, 0x1b, 0x11);
+ ok &= ns2501_writeb(dvo, 0x1c, 0x54);
+ ok &= ns2501_writeb(dvo, 0x1d, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0x34, 0x03);
+ ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+ ok &= ns2501_writeb(dvo, 0x80, 0xff);
+ ok &= ns2501_writeb(dvo, 0x81, 0x07);
+ ok &= ns2501_writeb(dvo, 0x82, 0x3d);
+ ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+ ok &= ns2501_writeb(dvo, 0x8e, 0x10);
+ ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
+ ok &= ns2501_writeb(dvo, 0x91, 0x07);
+ ok &= ns2501_writeb(dvo, 0x94, 0x00);
+ ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+ ok &= ns2501_writeb(dvo, 0x96, 0x05);
+
+ ok &= ns2501_writeb(dvo, 0x99, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+ ok &= ns2501_writeb(dvo, 0x9c, 0x24);
+ ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+ ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+ ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+ ok &= ns2501_writeb(dvo, 0xa4, 0x84);
+
+ ok &= ns2501_writeb(dvo, 0xb6, 0x09);
+
+ ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
+
+ ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
+ ok &= ns2501_writeb(dvo, 0xc1, 0x90);
+
+ ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
+
+ ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+ ok &= ns2501_writeb(dvo, 0xc5, 0x16);
+
+ ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+ ok &= ns2501_writeb(dvo, 0xc7, 0x02);
+ ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+ } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
+ /* mode 280 */
+ DRM_DEBUG_KMS("%s: switching to 1024x768\n",
+ __FUNCTION__);
+ /*
+ * This might or might not work, actually. I'm silently
+ * assuming here that the native panel resolution is
+ * 1024x768. If not, then this leaves the scaler disabled
+ * generating a picture that is likely not the expected.
+ *
+ * Problem is that I do not know where to take the panel
+ * dimensions from.
+ *
+ * Enable the bypass, scaling not required.
+ *
+ * The scaler registers are irrelevant here....
+ *
+ */
+ ns->reg_8_shadow |= NS2501_8_BPAS;
+ ok &= ns2501_writeb(dvo, 0x37, 0x44);
+ } else {
+ /*
+ * Data not known. Bummer!
+ * Hopefully, the code should not go here
+ * as mode_OK delivered no other modes.
+ */
+ ns->reg_8_shadow |= NS2501_8_BPAS;
+ }
+ ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
+
+ if (!ok) {
+ if (restore)
+ restore_dvo(dvo);
+ enable_dvo(dvo);
+ restore = true;
+ }
+ } while (!ok);
+ /*
+ * Restore the old i915 registers before
+ * forcing the ns2501 on.
+ */
+ if (restore)
+ restore_dvo(dvo);
+}
+
+/* set the NS2501 power state */
+static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
+{
+ unsigned char ch;
+
+ if (!ns2501_readb(dvo, NS2501_REG8, &ch))
+ return false;
+
+ if (ch & NS2501_8_PD)
+ return true;
+ else
+ return false;
+}
+
+/* set the NS2501 power state */
+static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ bool ok;
+ bool restore = false;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ unsigned char ch;
+
+ DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
+ __FUNCTION__, enable);
+
+ ch = ns->reg_8_shadow;
+
+ if (enable)
+ ch |= NS2501_8_PD;
+ else
+ ch &= ~NS2501_8_PD;
+
+ if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
+ ns->reg_8_set = 1;
+ ns->reg_8_shadow = ch;
+
+ do {
+ ok = true;
+ ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
+ ok &=
+ ns2501_writeb(dvo, 0x34,
+ enable ? 0x03 : 0x00);
+ ok &=
+ ns2501_writeb(dvo, 0x35,
+ enable ? 0xff : 0x00);
+ if (!ok) {
+ if (restore)
+ restore_dvo(dvo);
+ enable_dvo(dvo);
+ restore = true;
+ }
+ } while (!ok);
+
+ if (restore)
+ restore_dvo(dvo);
+ }
+}
+
+static void ns2501_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ ns2501_readb(dvo, NS2501_FREQ_LO, &val);
+ DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_FREQ_HI, &val);
+ DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REG8, &val);
+ DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REG9, &val);
+ DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
+ ns2501_readb(dvo, NS2501_REGC, &val);
+ DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
+}
+
+static void ns2501_destroy(struct intel_dvo_device *dvo)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+
+ if (ns) {
+ kfree(ns, sizeof(struct ns2501_priv));
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ns2501_ops = {
+ .init = ns2501_init,
+ .detect = ns2501_detect,
+ .mode_valid = ns2501_mode_valid,
+ .mode_set = ns2501_mode_set,
+ .dpms = ns2501_dpms,
+ .get_hw_state = ns2501_get_hw_state,
+ .dump_regs = ns2501_dump_regs,
+ .destroy = ns2501_destroy,
+};
diff --git a/usr/src/uts/intel/io/i915/dvo_sil164.c b/usr/src/uts/intel/io/i915/dvo_sil164.c
new file mode 100644
index 0000000..c9c7c08
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/dvo_sil164.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+/*
+ * Copyright (c) 2012, 2013, Intel Corporation. All rights reserved.
+ */
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "dvo.h"
+
+#define SIL164_VID 0x0001
+#define SIL164_DID 0x0006
+
+#define SIL164_VID_LO 0x00
+#define SIL164_VID_HI 0x01
+#define SIL164_DID_LO 0x02
+#define SIL164_DID_HI 0x03
+#define SIL164_REV 0x04
+#define SIL164_RSVD 0x05
+#define SIL164_FREQ_LO 0x06
+#define SIL164_FREQ_HI 0x07
+
+#define SIL164_REG8 0x08
+#define SIL164_8_VEN (1<<5)
+#define SIL164_8_HEN (1<<4)
+#define SIL164_8_DSEL (1<<3)
+#define SIL164_8_BSEL (1<<2)
+#define SIL164_8_EDGE (1<<1)
+#define SIL164_8_PD (1<<0)
+
+#define SIL164_REG9 0x09
+#define SIL164_9_VLOW (1<<7)
+#define SIL164_9_MSEL_MASK (0x7<<4)
+#define SIL164_9_TSEL (1<<3)
+#define SIL164_9_RSEN (1<<2)
+#define SIL164_9_HTPLG (1<<1)
+#define SIL164_9_MDI (1<<0)
+
+#define SIL164_REGC 0x0c
+
+struct sil164_priv {
+ //I2CDevRec d;
+ bool quiet;
+};
+
+#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
+
+static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+ struct sil164_priv *sil = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = (u8) addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ };
+
+ if (!sil->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+ return false;
+}
+
+static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct sil164_priv *sil= dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = (uint8_t) addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(adapter, &msg, 1) == 1)
+ return true;
+
+ if (!sil->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/* Silicon Image 164 driver for chip on i2c bus */
+static bool sil164_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ /* this will detect the SIL164 chip on the specified i2c bus */
+ struct sil164_priv *sil;
+ unsigned char ch;
+
+ sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL);
+ if (sil == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = sil;
+ sil->quiet = true;
+
+ if (!sil164_readb(dvo, SIL164_VID_LO, &ch))
+ goto out;
+
+ if (ch != (SIL164_VID & 0xff)) {
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+
+ if (!sil164_readb(dvo, SIL164_DID_LO, &ch))
+ goto out;
+
+ if (ch != (SIL164_DID & 0xff)) {
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+ sil->quiet = false;
+
+ DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
+ return true;
+
+out:
+ kfree(sil, sizeof(struct sil164_priv));
+ return false;
+}
+
+static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
+{
+ uint8_t reg9;
+
+ (void) sil164_readb(dvo, SIL164_REG9, &reg9);
+
+ if (reg9 & SIL164_9_HTPLG)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+/* LINTED */
+static int sil164_mode_valid(struct intel_dvo_device *dvo,
+ /* LINTED */
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+/* LINTED */
+static void sil164_mode_set(struct intel_dvo_device *dvo,
+ /* LINTED */
+ struct drm_display_mode *mode,
+ /* LINTED */
+ struct drm_display_mode *adjusted_mode)
+{
+ /* As long as the basics are set up, since we don't have clock
+ * dependencies in the mode setup, we can just leave the
+ * registers alone and everything will work fine.
+ */
+ /* recommended programming sequence from doc */
+ /*sil164_writeb(sil, 0x08, 0x30);
+ sil164_writeb(sil, 0x09, 0x00);
+ sil164_writeb(sil, 0x0a, 0x90);
+ sil164_writeb(sil, 0x0c, 0x89);
+ sil164_writeb(sil, 0x08, 0x31);*/
+ /* don't do much */
+ return;
+}
+
+/* set the SIL164 power state */
+static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ int ret;
+ unsigned char ch;
+
+ ret = sil164_readb(dvo, SIL164_REG8, &ch);
+ if (ret == false)
+ return;
+
+ if (enable)
+ ch |= SIL164_8_PD;
+ else
+ ch &= ~SIL164_8_PD;
+
+ (void) sil164_writeb(dvo, SIL164_REG8, ch);
+ return;
+}
+
+static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
+{
+ int ret;
+ unsigned char ch;
+
+ ret = sil164_readb(dvo, SIL164_REG8, &ch);
+ if (ret == false)
+ return false;
+
+ if (ch & SIL164_8_PD)
+ return true;
+ else
+ return false;
+}
+static void sil164_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ (void) sil164_readb(dvo, SIL164_FREQ_LO, &val);
+ DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
+ (void) sil164_readb(dvo, SIL164_FREQ_HI, &val);
+ DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
+ (void) sil164_readb(dvo, SIL164_REG8, &val);
+ DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
+ (void) sil164_readb(dvo, SIL164_REG9, &val);
+ DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
+ (void) sil164_readb(dvo, SIL164_REGC, &val);
+ DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
+}
+
+static void sil164_destroy(struct intel_dvo_device *dvo)
+{
+ struct sil164_priv *sil = dvo->dev_priv;
+
+ if (sil) {
+ kfree(sil, sizeof (struct sil164_priv));
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops sil164_ops = {
+ .init = sil164_init,
+ .detect = sil164_detect,
+ .mode_valid = sil164_mode_valid,
+ .mode_set = sil164_mode_set,
+ .dpms = sil164_dpms,
+ .get_hw_state = sil164_get_hw_state,
+ .dump_regs = sil164_dump_regs,
+ .destroy = sil164_destroy,
+};
diff --git a/usr/src/uts/intel/io/i915/dvo_tfp410.c b/usr/src/uts/intel/io/i915/dvo_tfp410.c
new file mode 100644
index 0000000..407404e
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/dvo_tfp410.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012, 2013, Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Copyright © 2007 Dave Mueller
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Mueller <dave.mueller@gmx.ch>
+ *
+ */
+
+#include "dvo.h"
+
+/* register definitions according to the TFP410 data sheet */
+#define TFP410_VID 0x014C
+#define TFP410_DID 0x0410
+
+#define TFP410_VID_LO 0x00
+#define TFP410_VID_HI 0x01
+#define TFP410_DID_LO 0x02
+#define TFP410_DID_HI 0x03
+#define TFP410_REV 0x04
+
+#define TFP410_CTL_1 0x08
+#define TFP410_CTL_1_TDIS (1<<6)
+#define TFP410_CTL_1_VEN (1<<5)
+#define TFP410_CTL_1_HEN (1<<4)
+#define TFP410_CTL_1_DSEL (1<<3)
+#define TFP410_CTL_1_BSEL (1<<2)
+#define TFP410_CTL_1_EDGE (1<<1)
+#define TFP410_CTL_1_PD (1<<0)
+
+#define TFP410_CTL_2 0x09
+#define TFP410_CTL_2_VLOW (1<<7)
+#define TFP410_CTL_2_MSEL_MASK (0x7<<4)
+#define TFP410_CTL_2_MSEL (1<<4)
+#define TFP410_CTL_2_TSEL (1<<3)
+#define TFP410_CTL_2_RSEN (1<<2)
+#define TFP410_CTL_2_HTPLG (1<<1)
+#define TFP410_CTL_2_MDI (1<<0)
+
+#define TFP410_CTL_3 0x0A
+#define TFP410_CTL_3_DK_MASK (0x7<<5)
+#define TFP410_CTL_3_DK (1<<5)
+#define TFP410_CTL_3_DKEN (1<<4)
+#define TFP410_CTL_3_CTL_MASK (0x7<<1)
+#define TFP410_CTL_3_CTL (1<<1)
+
+#define TFP410_USERCFG 0x0B
+
+#define TFP410_DE_DLY 0x32
+
+#define TFP410_DE_CTL 0x33
+#define TFP410_DE_CTL_DEGEN (1<<6)
+#define TFP410_DE_CTL_VSPOL (1<<5)
+#define TFP410_DE_CTL_HSPOL (1<<4)
+#define TFP410_DE_CTL_DEDLY8 (1<<0)
+
+#define TFP410_DE_TOP 0x34
+
+#define TFP410_DE_CNT_LO 0x36
+#define TFP410_DE_CNT_HI 0x37
+
+#define TFP410_DE_LIN_LO 0x38
+#define TFP410_DE_LIN_HI 0x39
+
+#define TFP410_H_RES_LO 0x3A
+#define TFP410_H_RES_HI 0x3B
+
+#define TFP410_V_RES_LO 0x3C
+#define TFP410_V_RES_HI 0x3D
+
+struct tfp410_priv {
+ bool quiet;
+};
+
+static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = (u8) addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ };
+
+ if (!tfp->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+ return false;
+}
+
+static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = (uint8_t) addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(adapter, &msg, 1) == 1)
+ return true;
+
+ if (!tfp->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
+{
+ uint8_t ch1, ch2;
+
+ if (tfp410_readb(dvo, addr+0, &ch1) &&
+ tfp410_readb(dvo, addr+1, &ch2))
+ return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF);
+
+ return -1;
+}
+
+/* Ti TFP410 driver for chip on i2c bus */
+static bool tfp410_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ /* this will detect the tfp410 chip on the specified i2c bus */
+ struct tfp410_priv *tfp;
+ int id;
+
+ tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL);
+ if (tfp == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = tfp;
+ tfp->quiet = true;
+
+ if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
+ DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
+ "Slave %d.\n",
+ id, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+
+ if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
+ DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
+ "Slave %d.\n",
+ id, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+ tfp->quiet = false;
+ return true;
+out:
+ kfree(tfp, sizeof(struct tfp410_priv));
+ return false;
+}
+
+static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
+{
+ enum drm_connector_status ret = connector_status_disconnected;
+ uint8_t ctl2;
+
+ if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
+ if (ctl2 & TFP410_CTL_2_RSEN)
+ ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+ }
+
+ return ret;
+}
+
+/* LINTED */
+static int tfp410_mode_valid(struct intel_dvo_device *dvo,
+ /* LINTED */
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+/* LINTED */
+static void tfp410_mode_set(struct intel_dvo_device *dvo,
+ /* LINTED */
+ struct drm_display_mode *mode,
+ /* LINTED */
+ struct drm_display_mode *adjusted_mode)
+{
+ /* As long as the basics are set up, since we don't have clock dependencies
+ * in the mode setup, we can just leave the registers alone and everything
+ * will work fine.
+ */
+ /* don't do much */
+ return;
+}
+
+/* set the tfp410 power state */
+static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ uint8_t ctl1;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+ return;
+
+ if (enable)
+ ctl1 |= TFP410_CTL_1_PD;
+ else
+ ctl1 &= ~TFP410_CTL_1_PD;
+
+ (void) tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
+}
+
+static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
+{
+ uint8_t ctl1;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+ return false;
+
+ if (ctl1 & TFP410_CTL_1_PD)
+ return true;
+ else
+ return false;
+}
+static void tfp410_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val, val2;
+
+ (void) tfp410_readb(dvo, TFP410_REV, &val);
+ DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
+ (void) tfp410_readb(dvo, TFP410_CTL_1, &val);
+ DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
+ (void) tfp410_readb(dvo, TFP410_CTL_2, &val);
+ DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
+ (void) tfp410_readb(dvo, TFP410_CTL_3, &val);
+ DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
+ (void) tfp410_readb(dvo, TFP410_USERCFG, &val);
+ DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
+ (void) tfp410_readb(dvo, TFP410_DE_DLY, &val);
+ DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
+ (void) tfp410_readb(dvo, TFP410_DE_CTL, &val);
+ DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
+ (void) tfp410_readb(dvo, TFP410_DE_TOP, &val);
+ DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
+ (void) tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
+ (void) tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
+ DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+ (void) tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
+ (void) tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
+ DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+ (void) tfp410_readb(dvo, TFP410_H_RES_LO, &val);
+ (void) tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
+ DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+ (void) tfp410_readb(dvo, TFP410_V_RES_LO, &val);
+ (void) tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
+ DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+}
+
+static void tfp410_destroy(struct intel_dvo_device *dvo)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+
+ if (tfp) {
+ kfree(tfp, sizeof(struct tfp410_priv));
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops tfp410_ops = {
+ .init = tfp410_init,
+ .detect = tfp410_detect,
+ .mode_valid = tfp410_mode_valid,
+ .mode_set = tfp410_mode_set,
+ .dpms = tfp410_dpms,
+ .get_hw_state = tfp410_get_hw_state,
+ .dump_regs = tfp410_dump_regs,
+ .destroy = tfp410_destroy,
+};
diff --git a/usr/src/uts/intel/io/i915/i915_dma.c b/usr/src/uts/intel/io/i915/i915_dma.c
new file mode 100644
index 0000000..b3b3e0c
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_dma.c
@@ -0,0 +1,1823 @@
+/*
+ * Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "drm_linux.h"
+#include "drm_mm.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_io32.h"
+#include <sys/agp/agptarget_io.h>
+
+#define USE_PCI_DMA_API 0
+
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
+
+#define BEGIN_LP_RING(n) \
+ intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+ intel_ring_emit(LP_RING(dev_priv), x)
+
+#define ADVANCE_LP_RING() \
+ intel_ring_advance(LP_RING(dev_priv))
+
+/**
+ * Lock test for when it's just for synchronization of ring access.
+ *
+ * In that case, we don't need to do it when GEM is initialized as nobody else
+ * has access to the ring.
+ */
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
+ if (LP_RING(dev->dev_private)->obj == NULL) \
+ LOCK_TEST_WITH_RETURN(dev, file); \
+} while (__lintzero)
+
+static inline u32
+intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
+{
+ if (I915_NEED_GFX_HWS(dev_priv->dev)) {
+ u32 *regs = (u32 *)dev_priv->dri1.gfx_hws_cpu_addr.handle;
+ return regs[reg];
+ } else
+ return intel_read_status_page(LP_RING(dev_priv), reg);
+}
+
+#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
+#define I915_BREADCRUMB_INDEX 0x21
+
+void i915_update_dri1_breadcrumb(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
+}
+
+static void i915_write_hws_pga(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 addr;
+
+ addr = dev_priv->status_page_dmah->paddr;
+ if (INTEL_INFO(dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->paddr >> 28) & 0xf0;
+ I915_WRITE(HWS_PGA, addr);
+}
+
+/**
+ * Frees the hardware status page, whether it's a physical address or a virtual
+ * address set up by the X Server.
+ */
+static void i915_free_hws(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ if (dev_priv->status_page_dmah) {
+ drm_pci_free(dev_priv->status_page_dmah);
+ dev_priv->status_page_dmah = NULL;
+ }
+
+ if (ring->status_page.gfx_addr) {
+ ring->status_page.gfx_addr = 0;
+ drm_core_ioremapfree(&dev_priv->dri1.gfx_hws_cpu_addr, dev);
+ }
+
+ /* Need to rewrite hardware status page */
+ I915_WRITE(HWS_PGA, 0x1ffff000);
+}
+
+void i915_kernel_lost_context(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ /*
+ * We should never lose context on the ring with modesetting
+ * as we don't expose it to userspace
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
+ if (ring->space < 0)
+ ring->space += ring->size;
+
+ if (!dev->primary->master)
+ return;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (ring->head == ring->tail && master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
+}
+
+static int i915_dma_cleanup(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ /* Make sure interrupts are disabled here because the uninstall ioctl
+ * may not have been called from userspace and after dev_private
+ * is freed, it's too late.
+ */
+ if (dev->irq_enabled)
+ (void) drm_irq_uninstall(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ intel_cleanup_ring_buffer(&dev_priv->ring[i]);
+ mutex_unlock(&dev->struct_mutex);
+
+ /* Clear the HWS virtual address at teardown */
+ if (I915_NEED_GFX_HWS(dev))
+ i915_free_hws(dev);
+
+ return 0;
+}
+
+static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ int ret;
+
+ master_priv->sarea = drm_getsarea(dev);
+ if (master_priv->sarea) {
+ master_priv->sarea_priv = (drm_i915_sarea_t *)(uintptr_t)
+ ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
+ } else {
+ DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
+ }
+
+ if (init->ring_size != 0) {
+ if (LP_RING(dev_priv)->obj != NULL) {
+ (void) i915_dma_cleanup(dev);
+ DRM_ERROR("Client tried to initialize ringbuffer in "
+ "GEM mode\n");
+ return -EINVAL;
+ }
+
+ ret = intel_render_ring_init_dri(dev,
+ init->ring_start,
+ init->ring_size);
+ if (ret) {
+ (void) i915_dma_cleanup(dev);
+ return ret;
+ }
+ }
+
+ dev_priv->dri1.cpp = init->cpp;
+ dev_priv->dri1.back_offset = init->back_offset;
+ dev_priv->dri1.front_offset = init->front_offset;
+ dev_priv->dri1.current_page = 0;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->pf_current_page = 0;
+
+ /* Allow hardware batchbuffers unless told otherwise.
+ */
+ dev_priv->dri1.allow_batchbuffer = 1;
+
+ return 0;
+}
+
+static int i915_dma_resume(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ DRM_DEBUG_DRIVER("%s\n", __func__);
+
+ if (ring->virtual_start == NULL) {
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
+
+ /* Program Hardware Status Page */
+ if (!ring->status_page.page_addr) {
+ DRM_ERROR("Can not find hardware status page\n");
+ return -EINVAL;
+ }
+ DRM_DEBUG_DRIVER("hw status page @ %p\n",
+ ring->status_page.page_addr);
+ if (ring->status_page.gfx_addr != 0)
+ intel_ring_setup_status_page(ring);
+ else
+ i915_write_hws_pga(dev);
+ DRM_DEBUG_DRIVER("Enabled hardware status page\n");
+
+ return 0;
+}
+/* LINTED */
+static int i915_dma_init(DRM_IOCTL_ARGS)
+{
+ drm_i915_init_t *init = data;
+ int retcode = 0;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ switch (init->func) {
+ case I915_INIT_DMA:
+ retcode = i915_initialize(dev, init);
+ break;
+ case I915_CLEANUP_DMA:
+ retcode = i915_dma_cleanup(dev);
+ break;
+ case I915_RESUME_DMA:
+ retcode = i915_dma_resume(dev);
+ break;
+ default:
+ retcode = -EINVAL;
+ break;
+ }
+
+ return retcode;
+}
+
+/* Implement basically the same security restrictions as hardware does
+ * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
+ *
+ * Most of the calculations below involve calculating the size of a
+ * particular instruction. It's important to get the size right as
+ * that tells us where the next instruction to check is. Any illegal
+ * instruction detected will be given a size of zero, which is a
+ * signal to abort the rest of the buffer.
+ */
+static int validate_cmd(int cmd)
+{
+ switch (((cmd >> 29) & 0x7)) {
+ case 0x0:
+ switch ((cmd >> 23) & 0x3f) {
+ case 0x0:
+ return 1; /* MI_NOOP */
+ case 0x4:
+ return 1; /* MI_FLUSH */
+ default:
+ return 0; /* disallow everything else */
+ }
+#ifndef __SUNPRO_C
+ break;
+#endif
+ case 0x1:
+ return 0; /* reserved */
+ case 0x2:
+ return (cmd & 0xff) + 2; /* 2d commands */
+ case 0x3:
+ if (((cmd >> 24) & 0x1f) <= 0x18)
+ return 1;
+
+ switch ((cmd >> 24) & 0x1f) {
+ case 0x1c:
+ return 1;
+ case 0x1d:
+ switch ((cmd >> 16) & 0xff) {
+ case 0x3:
+ return (cmd & 0x1f) + 2;
+ case 0x4:
+ return (cmd & 0xf) + 2;
+ default:
+ return (cmd & 0xffff) + 2;
+ }
+ case 0x1e:
+ if (cmd & (1 << 23))
+ return (cmd & 0xffff) + 1;
+ else
+ return 1;
+ case 0x1f:
+ if ((cmd & (1 << 23)) == 0) /* inline vertices */
+ return (cmd & 0x1ffff) + 2;
+ else if (cmd & (1 << 17)) /* indirect random */
+ if ((cmd & 0xffff) == 0)
+ return 0; /* unknown length, too hard */
+ else
+ return (((cmd & 0xffff) + 1) / 2) + 1;
+ else
+ return 2; /* indirect sequential */
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+
+#ifndef __SUNPRO_C
+ return 0;
+#endif
+}
+
+static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i, ret;
+
+ if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
+ return -EINVAL;
+
+ for (i = 0; i < dwords;) {
+ int sz = validate_cmd(buffer[i]);
+ if (sz == 0 || i + sz > dwords)
+ return -EINVAL;
+ i += sz;
+ }
+
+ ret = BEGIN_LP_RING((dwords+1)&~1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dwords; i++)
+ OUT_RING(buffer[i]);
+ if (dwords & 1)
+ OUT_RING(0);
+
+ ADVANCE_LP_RING();
+
+ return 0;
+}
+
+int
+i915_emit_box(struct drm_device *dev,
+ struct drm_clip_rect *box,
+ int DR1, int DR4)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
+ (unsigned) box->y2 <= 0 || (unsigned) box->x2 <= 0) {
+ DRM_ERROR("Bad box %d,%d..%d,%d\n",
+ box->x1, box->y1, box->x2, box->y2);
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ return ret;
+
+ OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
+ OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+ OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
+ OUT_RING(DR4);
+ } else {
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ return ret;
+
+ OUT_RING(GFX_OP_DRAWRECT_INFO);
+ OUT_RING(DR1);
+ OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+ OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
+ OUT_RING(DR4);
+ OUT_RING(0);
+ }
+ ADVANCE_LP_RING();
+
+ return 0;
+}
+
+/* XXX: Emitting the counter should really be moved to part of the IRQ
+ * emit. For now, do it in both places:
+ */
+
+static void i915_emit_breadcrumb(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+
+ dev_priv->dri1.counter++;
+ if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+ dev_priv->dri1.counter = 0;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
+
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->dri1.counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
+}
+
+static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+ drm_i915_cmdbuffer_t *cmd,
+ struct drm_clip_rect *cliprects,
+ void *cmdbuf)
+{
+ int nbox = cmd->num_cliprects;
+ int i = 0, count, ret;
+
+ if (cmd->sz & 0x3) {
+ DRM_ERROR("alignment");
+ return -EINVAL;
+ }
+
+ i915_kernel_lost_context(dev);
+
+ count = nbox ? nbox : 1;
+
+ for (i = 0; i < count; i++) {
+ if (i < nbox) {
+ ret = i915_emit_box(dev, &cliprects[i],
+ cmd->DR1, cmd->DR4);
+ if (ret)
+ return ret;
+ }
+
+ ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
+ if (ret)
+ return ret;
+ }
+
+ i915_emit_breadcrumb(dev);
+ return 0;
+}
+
+static int i915_dispatch_batchbuffer(struct drm_device * dev,
+ drm_i915_batchbuffer_t * batch,
+ struct drm_clip_rect *cliprects)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int nbox = batch->num_cliprects;
+ int i, count, ret;
+
+ if ((batch->start | batch->used) & 0x7) {
+ DRM_ERROR("alignment");
+ return -EINVAL;
+ }
+
+ i915_kernel_lost_context(dev);
+
+ count = nbox ? nbox : 1;
+ for (i = 0; i < count; i++) {
+ if (i < nbox) {
+ ret = i915_emit_box(dev, &cliprects[i],
+ batch->DR1, batch->DR4);
+ if (ret)
+ return ret;
+ }
+
+ if (!IS_I830(dev) && !IS_845G(dev)) {
+ ret = BEGIN_LP_RING(2);
+ if (ret)
+ return ret;
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
+ OUT_RING(batch->start);
+ } else {
+ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
+ OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+ }
+ } else {
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ return ret;
+
+ OUT_RING(MI_BATCH_BUFFER);
+ OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+ OUT_RING(batch->start + batch->used - 4);
+ OUT_RING(0);
+ }
+ ADVANCE_LP_RING();
+ }
+
+
+ if (IS_G4X(dev) || IS_GEN5(dev)) {
+ if (BEGIN_LP_RING(2) == 0) {
+ OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+ }
+ }
+
+ i915_emit_breadcrumb(dev);
+ return 0;
+}
+
+static int i915_dispatch_flip(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv =
+ dev->primary->master->driver_priv;
+ int ret;
+
+ if (!master_priv->sarea_priv)
+ return -EINVAL;
+
+ DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
+ __func__,
+ dev_priv->dri1.current_page,
+ master_priv->sarea_priv->pf_current_page);
+
+ i915_kernel_lost_context(dev);
+
+ ret = BEGIN_LP_RING(10);
+ if (ret)
+ return ret;
+
+ OUT_RING(MI_FLUSH | MI_READ_FLUSH);
+ OUT_RING(0);
+
+ OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
+ OUT_RING(0);
+ if (dev_priv->dri1.current_page == 0) {
+ OUT_RING(dev_priv->dri1.back_offset);
+ dev_priv->dri1.current_page = 1;
+ } else {
+ OUT_RING(dev_priv->dri1.front_offset);
+ dev_priv->dri1.current_page = 0;
+ }
+ OUT_RING(0);
+
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
+ OUT_RING(0);
+
+ ADVANCE_LP_RING();
+
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
+
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->dri1.counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
+
+ master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
+ return 0;
+}
+
+static int i915_quiescent(struct drm_device * dev)
+{
+ i915_kernel_lost_context(dev);
+ return intel_ring_idle(LP_RING(dev->dev_private));
+}
+
+/* LINTED */
+static int i915_flush_ioctl(DRM_IOCTL_ARGS)
+{
+ int ret;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file);
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_quiescent(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+/* LINTED */
+static int i915_batchbuffer(DRM_IOCTL_ARGS)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+ master_priv->sarea_priv;
+ drm_i915_batchbuffer_t *batch = data;
+ int ret;
+ struct drm_clip_rect *cliprects = NULL;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv->dri1.allow_batchbuffer) {
+ DRM_ERROR("Batchbuffer ioctl disabled\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
+ batch->start, batch->used, batch->num_cliprects);
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file);
+
+ if (batch->num_cliprects < 0)
+ return -EINVAL;
+
+ if (batch->num_cliprects) {
+ cliprects = kcalloc(batch->num_cliprects,
+ sizeof(struct drm_clip_rect),
+ GFP_KERNEL);
+ if (cliprects == NULL)
+ return -ENOMEM;
+
+ ret = copy_from_user(cliprects, batch->cliprects,
+ batch->num_cliprects *
+ sizeof(struct drm_clip_rect));
+ if (ret != 0) {
+ ret = -EFAULT;
+ goto fail_free;
+ }
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (sarea_priv)
+ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+fail_free:
+ kfree(cliprects, batch->num_cliprects * sizeof(struct drm_clip_rect));
+
+ return ret;
+}
+
+/* LINTED */
+static int i915_cmdbuffer(DRM_IOCTL_ARGS)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+ master_priv->sarea_priv;
+ drm_i915_cmdbuffer_t *cmdbuf = data;
+ struct drm_clip_rect *cliprects = NULL;
+ void *batch_data;
+ int ret;
+
+ DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
+ (void *)cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file);
+
+ if (cmdbuf->num_cliprects < 0)
+ return -EINVAL;
+
+ batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
+ if (batch_data == NULL)
+ return -ENOMEM;
+
+ ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
+ if (ret != 0) {
+ ret = -EFAULT;
+ goto fail_batch_free;
+ }
+
+ if (cmdbuf->num_cliprects) {
+ cliprects = kcalloc(cmdbuf->num_cliprects,
+ sizeof(struct drm_clip_rect), GFP_KERNEL);
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
+ goto fail_batch_free;
+ }
+
+ ret = copy_from_user(cliprects, cmdbuf->cliprects,
+ cmdbuf->num_cliprects *
+ sizeof(struct drm_clip_rect));
+ if (ret != 0) {
+ ret = -EFAULT;
+ goto fail_clip_free;
+ }
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
+ goto fail_clip_free;
+ }
+
+ if (sarea_priv)
+ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+fail_clip_free:
+ kfree(cliprects, cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
+fail_batch_free:
+ kfree(batch_data, cmdbuf->sz);
+
+ return ret;
+}
+
+static int i915_emit_irq(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+
+ i915_kernel_lost_context(dev);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ dev_priv->dri1.counter++;
+ if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+ dev_priv->dri1.counter = 1;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
+
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->dri1.counter);
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
+
+ return dev_priv->dri1.counter;
+}
+
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ int ret = 0;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
+ READ_BREADCRUMB(dev_priv));
+
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ return 0;
+ }
+
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+ if (ring->irq_get(ring)) {
+ DRM_WAIT_ON(ret, &ring->irq_queue, 3 * DRM_HZ,
+ READ_BREADCRUMB(dev_priv) >= irq_nr);
+ ring->irq_put(ring);
+ } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
+ ret = -EBUSY;
+
+ if (ret == -EBUSY) {
+ DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
+ READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
+ }
+
+ return ret;
+}
+
+/* Needs the lock as it touches the ring.
+ */
+/* LINTED */
+int i915_irq_emit(DRM_IOCTL_ARGS)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_irq_emit_t *emit = data;
+ int result;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file);
+
+ mutex_lock(&dev->struct_mutex);
+ result = i915_emit_irq(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+ DRM_ERROR("copy_to_user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+/* LINTED */
+int i915_irq_wait(DRM_IOCTL_ARGS)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_irq_wait_t *irqwait = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ return i915_wait_irq(dev, irqwait->irq_seq);
+}
+
+/* LINTED */
+int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_vblank_pipe_t *pipe = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+ return 0;
+}
+
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+/* LINTED */
+int i915_vblank_swap(DRM_IOCTL_ARGS)
+{
+ /* The delayed swap mechanism was fundamentally racy, and has been
+ * removed. The model was that the client requested a delayed flip/swap
+ * from the kernel, then waited for vblank before continuing to perform
+ * rendering. The problem was that the kernel might wake the client
+ * up before it dispatched the vblank swap (since the lock has to be
+ * held while touching the ringbuffer), in which case the client would
+ * clear and start the next frame before the swap occurred, and
+ * flicker would occur in addition to likely missing the vblank.
+ *
+ * In the absence of this ioctl, userland falls back to a correct path
+ * of waiting for a vblank, then dispatching the swap on its own.
+ * Context switching to userland and back is plenty fast enough for
+ * meeting the requirements of vblank swapping.
+ */
+ return -EINVAL;
+}
+
+/* LINTED */
+static int i915_flip_bufs(DRM_IOCTL_ARGS)
+{
+ int ret;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ DRM_DEBUG_DRIVER("%s\n", __func__);
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file);
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_dispatch_flip(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+/* LINTED */
+static int i915_getparam(DRM_IOCTL_ARGS)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_getparam_t *param = data;
+ int value;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ switch (param->param) {
+ case I915_PARAM_IRQ_ACTIVE:
+ value = dev->pdev->irq ? 1 : 0;
+ break;
+ case I915_PARAM_ALLOW_BATCHBUFFER:
+ value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
+ break;
+ case I915_PARAM_LAST_DISPATCH:
+ value = READ_BREADCRUMB(dev_priv);
+ break;
+ case I915_PARAM_CHIPSET_ID:
+ value = dev->pci_device;
+ break;
+ case I915_PARAM_HAS_GEM:
+ value = 1;
+ break;
+ case I915_PARAM_NUM_FENCES_AVAIL:
+ value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
+ break;
+ case I915_PARAM_HAS_OVERLAY:
+ value = dev_priv->overlay ? 1 : 0;
+ break;
+ case I915_PARAM_HAS_PAGEFLIPPING:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXECBUF2:
+ /* depends on GEM */
+ value = 1;
+ break;
+ case I915_PARAM_HAS_BSD:
+ value = intel_ring_initialized(&dev_priv->ring[VCS]);
+ break;
+ case I915_PARAM_HAS_BLT:
+ value = intel_ring_initialized(&dev_priv->ring[BCS]);
+ break;
+ case I915_PARAM_HAS_VEBOX:
+ value = intel_ring_initialized(&dev_priv->ring[VECS]);
+ break;
+ case I915_PARAM_HAS_RELAXED_FENCING:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_COHERENT_RINGS:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
+ value = INTEL_INFO(dev)->gen >= 4;
+ break;
+ case I915_PARAM_HAS_RELAXED_DELTA:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_GEN7_SOL_RESET:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_LLC:
+ value = HAS_LLC(dev);
+ break;
+ case I915_PARAM_HAS_ALIASING_PPGTT:
+ value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
+ break;
+ case I915_PARAM_HAS_WAIT_TIMEOUT:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_SEMAPHORES:
+ value = i915_semaphore_is_enabled(dev);
+ break;
+ case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_SECURE_BATCHES:
+ /* not support yet */
+ value = 0;
+ break;
+ case I915_PARAM_HAS_PINNED_BATCHES:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_NO_RELOC:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+ value = 1;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unknown parameter %d\n",
+ param->param);
+ return -EINVAL;
+ }
+
+ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+ DRM_ERROR("DRM_COPY_TO_USER failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* LINTED */
+static int i915_setparam(DRM_IOCTL_ARGS)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_setparam_t *param = data;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ switch (param->param) {
+ case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
+ break;
+ case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
+ break;
+ case I915_SETPARAM_ALLOW_BATCHBUFFER:
+ dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
+ break;
+ case I915_SETPARAM_NUM_USED_FENCES:
+ if (param->value > dev_priv->num_fence_regs ||
+ param->value < 0)
+ return -EINVAL;
+ /* Userspace can use first N regs */
+ dev_priv->fence_reg_start = param->value;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown parameter %d\n",
+ param->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* LINTED */
+static int i915_set_status_page(DRM_IOCTL_ARGS)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_hws_addr_t *hws = data;
+ struct intel_ring_buffer *ring;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!I915_NEED_GFX_HWS(dev))
+ return -EINVAL;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ DRM_ERROR("tried to set status page when mode setting active\n");
+ return 0;
+ }
+
+ DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
+
+ ring = LP_RING(dev_priv);
+ ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
+
+ dev_priv->dri1.gfx_hws_cpu_addr.offset = (u_offset_t)dev->agp_aperbase + hws->addr;
+ dev_priv->dri1.gfx_hws_cpu_addr.size = 4*1024;
+ dev_priv->dri1.gfx_hws_cpu_addr.type = 0;
+ dev_priv->dri1.gfx_hws_cpu_addr.flags = 0;
+ dev_priv->dri1.gfx_hws_cpu_addr.mtrr = 0;
+
+ drm_core_ioremap(&dev_priv->dri1.gfx_hws_cpu_addr, dev);
+ if (dev_priv->dri1.gfx_hws_cpu_addr.handle == NULL) {
+ (void) i915_dma_cleanup(dev);
+ ring->status_page.gfx_addr = 0;
+ DRM_ERROR("can not ioremap virtual address for"
+ " G33 hw status page\n");
+ return -ENOMEM;
+ }
+
+ (void) memset(dev_priv->dri1.gfx_hws_cpu_addr.handle, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
+
+ DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
+ ring->status_page.gfx_addr);
+ DRM_DEBUG_DRIVER("load hws at %p\n",
+ ring->status_page.page_addr);
+ return 0;
+}
+
+static int i915_get_bridge_dev(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* OSOL_i915 Begin */
+ struct drm_i915_bridge_dev *bridge_dev = &dev_priv->bridge_dev;
+ char name[32];
+ int i, err;
+
+ if (INTEL_INFO(dev)->gen >= 6)
+ return 0;
+
+ if (bridge_dev->ldi_id) {
+ DRM_DEBUG("end");
+ return 0;
+ }
+
+ if (ldi_ident_from_dip(dev->devinfo, &bridge_dev->ldi_id)) {
+ bridge_dev->ldi_id = NULL;
+ DRM_DEBUG("failed");
+ return -1;
+ };
+
+ /* Workaround here:
+ * agptarget0 is not always linked to the right device
+ * try agptarget1 if failed at agptarget0
+ */
+ for (i = 0; i < 16; i++) {
+ (void) sprintf(name, "/dev/agp/agptarget%d", i);
+ err = ldi_open_by_name(name, 0, kcred,
+ &bridge_dev->bridge_dev_hdl, bridge_dev->ldi_id);
+ if (err == 0) {
+ break;
+ }
+ DRM_INFO("can't open agptarget%d", i);
+ }
+
+ if (err) {
+ ldi_ident_release(bridge_dev->ldi_id);
+ bridge_dev->ldi_id = NULL;
+ bridge_dev->bridge_dev_hdl = NULL;
+ return -1;
+ }
+ /* OSOL_i915 End */
+
+ return 0;
+}
+
+int i915_bridge_dev_read_config_word(struct drm_i915_bridge_dev *bridge_dev, int where, u16 *val)
+{
+ u16 data = (u16)where;
+
+ if (ldi_ioctl(bridge_dev->bridge_dev_hdl,
+ AGP_TARGET_PCICONFIG_GET16, (intptr_t)&data, FKIOCTL, kcred, 0))
+ return -1;
+
+ *val = data;
+ return 0;
+}
+
+int i915_bridge_dev_write_config_word(struct drm_i915_bridge_dev *bridge_dev, int where, u16 val)
+{
+ u32 data = (u16)where << 16 | val;
+
+ if (ldi_ioctl(bridge_dev->bridge_dev_hdl,
+ AGP_TARGET_PCICONFIG_SET16, (intptr_t)&data, FKIOCTL, kcred, 0))
+ return -1;
+
+ return 0;
+}
+
+/* true = enable decode, false = disable decoder */
+/* LINTED */
+static unsigned int i915_vga_set_decode(void *cookie, bool state)
+{
+ struct drm_device *dev = cookie;
+
+ (void) intel_modeset_vga_set_state(dev, state);
+ if (state)
+ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ else
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+
+static int i915_load_modeset_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = intel_parse_bios(dev);
+ if (ret)
+ DRM_INFO("failed to find VBIOS tables\n");
+
+ /* Initialise stolen first so that we may reserve preallocated
+ * objects for the BIOS to KMS transition.
+ */
+ ret = i915_gem_init_stolen(dev);
+ if (ret)
+ goto out;
+
+ /* clear interrupt related bits */
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+
+ ret = drm_irq_install(dev);
+ if (ret)
+ goto cleanup_gem_stolen;
+
+ /* Important: The output setup functions called by modeset_init need
+ * working irqs for e.g. gmbus and dp aux transfers. */
+ intel_modeset_init(dev);
+
+ ret = i915_gem_init(dev);
+ if (ret)
+ goto cleanup_irq;
+
+
+ intel_modeset_gem_init(dev);
+
+ /* Always safe in the mode setting case. */
+ /* FIXME: do pre/post-mode set stuff in core KMS code */
+ dev->vblank_disable_allowed = 1;
+ if (INTEL_INFO(dev)->num_pipes == 0) {
+ dev_priv->mm.suspended = 0;
+ return 0;
+ }
+
+ if (dev_priv->fbcon_obj != NULL) {
+ ret = intel_fbdev_init(dev);
+ if (ret)
+ goto cleanup_gem;
+
+ drm_register_fbops(dev);
+ }
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ intel_hpd_init(dev);
+
+ /*
+ * Some ports require correctly set-up hpd registers for detection to
+ * work properly (leading to ghost connected connector status), e.g. VGA
+ * on gm45. Hence we can only set up the initial fbdev config after hpd
+ * irqs are fully enabled. Now we should scan for the initial config
+ * only once hotplug handling is enabled, but due to screwed-up locking
+ * around kms/fbdev init we can't protect the fdbev initial config
+ * scanning against hotplug events. Hence do this first and ignore the
+ * tiny window where we will loose hotplug notifactions.
+ */
+ if (dev_priv->fbcon_obj != NULL)
+ intel_fbdev_initial_config(dev);
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ dev_priv->enable_hotplug_processing = true;
+
+ drm_kms_helper_poll_init(dev);
+
+ /* We're off and running w/KMS */
+ dev_priv->mm.suspended = 0;
+
+ return 0;
+
+cleanup_gem:
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_context_fini(dev);
+ mutex_unlock(&dev->struct_mutex);
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ drm_mm_takedown(&dev_priv->mm.gtt_space);
+cleanup_irq:
+ drm_irq_uninstall(dev);
+cleanup_gem_stolen:
+ i915_gem_cleanup_stolen(dev);
+out:
+ return ret;
+}
+
+/* LINTED */
+int i915_master_create(struct drm_device *dev, struct drm_master *master)
+{
+ struct drm_i915_master_private *master_priv;
+
+ master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
+ if (!master_priv)
+ return -ENOMEM;
+
+ master->driver_priv = master_priv;
+ return 0;
+}
+
+/* LINTED */
+void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
+{
+ struct drm_i915_master_private *master_priv = master->driver_priv;
+
+ if (!master_priv)
+ return;
+
+ kfree(master_priv, sizeof(struct drm_i915_master_private));
+
+ master->driver_priv = NULL;
+}
+
+
+/* OSOL_i915 Begin */
+#define pci_dev_put(d) do_pci_dev_put(&(d))
+
+void do_pci_dev_put(struct drm_i915_bridge_dev *bridge_dev)
+{
+ if (bridge_dev->bridge_dev_hdl) {
+ (void) ldi_close(bridge_dev->bridge_dev_hdl, 0, kcred);
+ bridge_dev->bridge_dev_hdl = NULL;
+ }
+
+ if (bridge_dev->ldi_id) {
+ ldi_ident_release(bridge_dev->ldi_id);
+ bridge_dev->ldi_id = NULL;
+ }
+}
+/* OSOL_i915 End */
+
+/**
+ * intel_early_sanitize_regs - clean up BIOS state
+ * @dev: DRM device
+ *
+ * This function must be called before we do any I915_READ or I915_WRITE. Its
+ * purpose is to clean up any state left by the BIOS that may affect us when
+ * reading and/or writing registers.
+ */
+static void intel_early_sanitize_regs(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_FPGA_DBG_UNCLAIMED(dev))
+ I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+}
+
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ * - drive output discovery via intel_modeset_init()
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct drm_i915_private *dev_priv;
+ struct intel_device_info *info;
+ resource_size_t base, size;
+ int ret = 0, mmio_bar;
+
+ info = (struct intel_device_info *) flags;
+
+ /* Refuse to load on gen6+ without kms enabled. */
+ if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ /* i915 has 4 more counters */
+ dev->counters += 4;
+ dev->types[6] = _DRM_STAT_IRQ;
+ dev->types[7] = _DRM_STAT_PRIMARY;
+ dev->types[8] = _DRM_STAT_SECONDARY;
+ dev->types[9] = _DRM_STAT_DMA;
+
+ dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ dev->dev_private = (void *)dev_priv;
+ dev_priv->dev = dev;
+ dev_priv->info = info;
+
+ dev_priv->info = (struct intel_device_info *) flags;
+
+ /* Add register map (needed for suspend/resume) */
+ mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ base = drm_get_resource_start(dev, mmio_bar);
+ size = drm_get_resource_len(dev, mmio_bar);
+
+ dev_priv->regs = drm_alloc(sizeof (drm_local_map_t), DRM_MEM_MAPS);
+ dev_priv->regs->offset = base;
+ dev_priv->regs->size = size;
+ dev_priv->regs->type = _DRM_REGISTERS;
+ dev_priv->regs->flags = _DRM_REMOVABLE;
+ if (drm_ioremap(dev, dev_priv->regs)) {
+ ret = -EIO;
+ goto put_bridge;
+ }
+
+ DRM_DEBUG("mmio paddr=%lx, kvaddr=%p", dev_priv->regs->offset, dev_priv->regs->handle);
+
+ intel_early_sanitize_regs(dev);
+ /* The i915 workqueue is primarily used for batched retirement of
+ * requests (and thus managing bo) once the task has been completed
+ * by the GPU. i915_gem_retire_requests() is called directly when we
+ * need high-priority retirement, such as waiting for an explicit
+ * bo.
+ *
+ * It is also used for periodic low-priority events, such as
+ * idle-timers and hangcheck.
+ *
+ * All tasks on the workqueue are expected to acquire the dev mutex
+ * so there is no point in running more than one instance of the
+ * workqueue at any time: max_active = 1 and NON_REENTRANT.
+ */
+ dev_priv->wq = create_workqueue(dev->devinfo, "i915");
+ if (dev_priv->wq == NULL) {
+ DRM_ERROR("Failed to create i915 workqueue.\n");
+ ret = -ENOMEM;
+ goto out_rmmap;
+ }
+
+ /* The i915 workqueue is primarily used for page_flip and fbc */
+ dev_priv->other_wq = create_workqueue(dev->devinfo, "i915_other");
+ if (dev_priv->other_wq == NULL) {
+ DRM_ERROR("Failed to create i915_other workqueue.\n");
+ ret = -ENOMEM;
+ goto out_mtrrfree;
+ }
+
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(dev);
+
+ intel_irq_init(dev);
+ intel_pm_init(dev);
+ intel_gt_sanitize(dev);
+ intel_gt_init(dev);
+
+ if (intel_setup_gmbus(dev) != 0)
+ goto out_mtrrfree;
+
+ /* Make sure the bios did its job and set up vital registers */
+ intel_setup_bios(dev);
+
+ i915_gem_load(dev);
+
+ /* On the 945G/GM, the chipset reports the MSI capability on the
+ * integrated graphics even though the support isn't actually there
+ * according to the published specs. It doesn't appear to function
+ * correctly in testing on 945G.
+ * This may be a side effect of MSI having been made available for PEG
+ * and the registers being closely associated.
+ *
+ * According to chipset errata, on the 965GM, MSI interrupts may
+ * be lost or delayed, but we use them anyways to avoid
+ * stuck interrupts on some machines.
+ */
+ /* Fix me: Failed to get interrupts after resume, when enable msi */
+ /*
+ if (!IS_I945G(dev) && !IS_I945GM(dev))
+ pci_enable_msi(dev->pdev);
+ */
+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
+ spin_lock_init(&dev_priv->rps.lock);
+ spin_lock_init(&dev_priv->dpio_lock);
+
+ spin_lock_init(&dev_priv->rps.hw_lock);
+ spin_lock_init(&dev_priv->modeset_restore_lock);
+
+ dev_priv->num_plane = 1;
+ if (IS_VALLEYVIEW(dev))
+ dev_priv->num_plane = 2;
+
+ if (INTEL_INFO(dev)->num_pipes) {
+ ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
+ if (ret)
+ goto out_gem_unload;
+ }
+
+ /* Start out suspended */
+ dev_priv->mm.suspended = 1;
+
+ if (HAS_POWER_WELL(dev))
+ i915_init_power_well(dev);
+
+ if (IS_GEN7(dev))
+ i915_try_reset = true;
+
+ dev_priv->gpu_hang = 0;
+
+ init_timer(&dev_priv->gpu_top_timer);
+ setup_timer(&dev_priv->gpu_top_timer, gpu_top_handler,
+ (void *) dev);
+
+ if (MDB_TRACK_ENABLE)
+ INIT_LIST_HEAD(&dev_priv->batch_list);
+
+ return 0;
+
+out_gem_unload:
+ destroy_workqueue(dev_priv->other_wq);
+out_mtrrfree:
+ destroy_workqueue(dev_priv->wq);
+out_rmmap:
+ drm_ioremapfree(dev_priv->regs);
+put_bridge:
+free_priv:
+ kfree(dev_priv, sizeof(struct drm_i915_private));
+ return ret;
+}
+
+int i915_driver_unload(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (HAS_POWER_WELL(dev))
+ i915_remove_power_well(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+ i915_gem_retire_requests(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ destroy_workqueue(dev_priv->other_wq);
+ destroy_workqueue(dev_priv->wq);
+
+ del_timer_sync(&dev_priv->gpu_top_timer);
+ destroy_timer(&dev_priv->gpu_top_timer);
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+ destroy_timer(&dev_priv->gpu_error.hangcheck_timer);
+
+/* XXXX rebracket after this is tested */
+ /*
+ * Uninitialized GTT indicates that i915 never opens.
+ * So we should not try to release the resources
+ * which are only allocated in i915_driver_firstopen.
+ */
+ if (dev_priv->gtt.total !=0) {
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ (void) drm_irq_uninstall(dev);
+ /* XXX FIXME vga_client_register(dev->pdev, NULL, NULL, NULL); */
+ }
+
+ if (dev->pdev->msi_enabled)
+ pci_disable_msi(dev->pdev);
+
+ i915_free_hws(dev);//XXX should still be here ??
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (dev_priv->fbcon_obj != NULL)
+ intel_fbdev_fini(dev);
+ intel_modeset_cleanup(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_free_all_phys_object(dev);
+ i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_context_fini(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ i915_gem_cleanup_stolen(dev);
+ if (!I915_NEED_GFX_HWS(dev))
+ i915_free_hws(dev);
+ i915_gem_lastclose(dev);
+ if (dev_priv->gtt.scratch_page)
+ teardown_scratch_page(dev);
+ if (dev_priv->fbcon_obj != NULL) {
+ i915_gem_free_object(&dev_priv->fbcon_obj->base);
+ dev_priv->fbcon_obj = NULL;
+ }
+ }
+ }
+ drm_mm_takedown(&dev_priv->mm.gtt_space);
+ dev_priv->gtt.gtt_remove(dev);
+
+ if (dev_priv->regs != NULL)
+ (void) drm_rmmap(dev, dev_priv->regs);
+
+ mutex_destroy(&dev_priv->irq_lock);
+
+ pci_dev_put(dev_priv->bridge_dev);
+
+ if (MDB_TRACK_ENABLE) {
+ struct batch_info_list *r_list, *list_temp;
+ list_for_each_entry_safe(r_list, list_temp, struct batch_info_list, &dev_priv->batch_list, head) {
+ list_del(&r_list->head);
+ drm_free(r_list->obj_list, r_list->num * sizeof(caddr_t), DRM_MEM_MAPS);
+ drm_free(r_list, sizeof (struct batch_info_list), DRM_MEM_MAPS);
+ }
+ list_del(&dev_priv->batch_list);
+ }
+ kfree(dev->dev_private, sizeof(drm_i915_private_t));
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+int
+i915_driver_firstopen(struct drm_device *dev)
+{
+ static bool first_call = true;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ u32 aperbase;
+ int ret = 0;
+
+ if (first_call) {
+ /* OSOL_i915: moved from i915_driver_load */
+
+ if (i915_get_bridge_dev(dev)) {
+ DRM_ERROR("i915_get_bridge_dev() failed.");
+ return -EIO;
+ }
+
+ /*
+ * AGP has been removed for GEN6+,
+ * So we read the agp base and size here.
+ */
+ if (INTEL_INFO(dev)->gen >= 6) {
+ pci_read_config_dword(pdev, GEN6_CONF_GMADR, &aperbase);
+ dev->agp_aperbase = aperbase & GEN6_GTT_BASE_MASK;
+ } else {
+ dev->agp_aperbase = dev->agp->agp_info.agpi_aperbase;
+ }
+
+ ret = i915_gem_gtt_init(dev);
+ if (ret) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ pci_dev_put(dev_priv->bridge_dev);
+ ret = -ENODEV;
+ return ret;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = i915_load_modeset_init(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to init modeset\n");
+ pci_dev_put(dev_priv->bridge_dev);
+ return ret;
+ }
+ }
+ }
+
+ dev_priv->isX = 1;
+ first_call = false;
+ return ret;
+}
+
+/* LINTED */
+int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv;
+
+ DRM_DEBUG_DRIVER("\n");
+ i915_file_priv = (struct drm_i915_file_private *)
+ kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
+
+ if (!i915_file_priv)
+ return -ENOMEM;
+
+ spin_lock_init(&i915_file_priv->mm.lock);
+ INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
+
+ idr_init(&i915_file_priv->context_idr);
+
+ i915_file_priv->status = 1;
+ file_priv->driver_priv = i915_file_priv;
+
+ return 0;
+}
+
+/**
+ * i915_driver_lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited. In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ *
+ * Additionally, in the non-mode setting case, we'll tear down the AGP
+ * and DMA structures, since the kernel won't be using them, and clea
+ * up any GEM state.
+ */
+void i915_driver_lastclose(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* On gen6+ we refuse to init without kms enabled, but then the drm core
+ * goes right around and calls lastclose. Check for this and don't clean
+ * up anything. */
+ if (!dev_priv)
+ return;
+ dev_priv->isX = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_fb_restore_mode(dev);
+ return;
+ }
+
+ i915_gem_lastclose(dev);
+
+ (void) i915_dma_cleanup(dev);
+}
+
+void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+{
+ i915_gem_context_close(dev, file_priv);
+ i915_gem_release(dev, file_priv);
+}
+
+void i915_driver_entervt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Do nothing when coming back from high-res mode (VESA)*/
+ if (dev_priv->fbcon_obj)
+ return;
+
+ /* Need to do full modeset from VGA TEXT mode */
+ if (dev_priv->vt_holding > 0) {
+ (void) i915_restore_state(dev);
+ if (IS_HASWELL(dev))
+ intel_modeset_setup_hw_state(dev, false);
+ else
+ intel_modeset_setup_hw_state(dev, true);
+ }
+ dev_priv->vt_holding = 0;
+}
+
+void i915_driver_leavevt(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->fbcon_obj)
+ return;
+
+ (void) i915_save_state(dev);
+
+ if (IS_HASWELL(dev))
+ intel_modeset_disable(dev);
+
+ dev_priv->vt_holding = 1;
+}
+
+/* LINTED */
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ kfree(i915_file_priv, sizeof(*i915_file_priv));
+ file_priv->driver_priv = NULL;
+}
+
+struct drm_ioctl_desc i915_ioctls[] = {
+ I915_IOCTL_DEF(DRM_IOCTL_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_FLUSH, i915_flush_ioctl, DRM_AUTH, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_FLIP, i915_flip_bufs, DRM_AUTH, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH, copyin32_i915_batchbuffer, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH, copyin32_i915_irq_emit, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GETPARAM, i915_getparam, DRM_AUTH, copyin32_i915_getparam, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_ALLOC, drm_noop, DRM_AUTH, copyin32_i915_mem_alloc, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_FREE, drm_noop, DRM_AUTH, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH, copyin32_i915_cmdbuffer, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED, NULL, NULL),
+ I915_IOCTL_DEF(DRM_IOCTL_I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED, NULL, NULL),
+};
+
+int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * All Intel graphics chipsets are treated as AGP, even if they are really
+ */
+/* LINTED */
+int i915_driver_device_is_agp(struct drm_device * dev)
+{
+ return 1;
+}
diff --git a/usr/src/uts/intel/io/i915/i915_drv.c b/usr/src/uts/intel/io/i915/i915_drv.c
new file mode 100644
index 0000000..d7bb151
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_drv.c
@@ -0,0 +1,1465 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * i915_drv.c -- Intel i915 driver -*- linux-c -*-
+ * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
+ */
+
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+/*
+ * I915 DRM Driver for Solaris
+ *
+ * This driver provides the hardware 3D acceleration support for Intel
+ * integrated video devices (e.g. i8xx/i915/i945 series chipsets), under the
+ * DRI (Direct Rendering Infrastructure). DRM (Direct Rendering Manager) here
+ * means the kernel device driver in DRI.
+ *
+ * I915 driver is a device dependent driver only, it depends on a misc module
+ * named drm for generic DRM operations.
+ */
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "drm_crtc_helper.h"
+#include "intel_drv.h"
+
+static int i915_modeset = -1;
+unsigned int i915_fbpercrtc = 0;
+int i915_panel_ignore_lid = 1;
+unsigned int i915_powersave = 1;
+
+int i915_semaphores = -1;
+
+int i915_enable_rc6 = 0;
+int i915_enable_fbc = -1;
+
+unsigned int i915_lvds_downclock = 0;
+int i915_lvds_channel_mode;
+
+int i915_panel_use_ssc = -1;
+int i915_vbt_sdvo_panel_type = -1;
+
+bool i915_try_reset = false;
+bool i915_enable_hangcheck = true;
+int i915_enable_ppgtt = -1;
+
+int i915_disable_power_well = 1;
+int i915_enable_ips = 1;
+
+static void *i915_statep;
+
+static int i915_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
+static int i915_attach(dev_info_t *, ddi_attach_cmd_t);
+static int i915_detach(dev_info_t *, ddi_detach_cmd_t);
+static int i915_quiesce(dev_info_t *);
+
+extern struct cb_ops drm_cb_ops;
+extern int intel_agp_enabled;
+
+static struct dev_ops i915_dev_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ i915_info, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ i915_attach, /* devo_attach */
+ i915_detach, /* devo_detach */
+ nodev, /* devo_reset */
+ &drm_cb_ops, /* devo_cb_ops */
+ NULL, /* devo_bus_ops */
+ NULL, /* power */
+ i915_quiesce, /* devo_quiesce */
+};
+
+static struct modldrv modldrv = {
+ &mod_driverops, /* drv_modops */
+ "I915 DRM driver", /* drv_linkinfo */
+ &i915_dev_ops, /* drv_dev_ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, (void *) &modldrv, NULL
+};
+
+#define INTEL_VGA_DEVICE(id, info) { \
+ .vendor = 0x8086, \
+ .device = id, \
+ .driver_data = (unsigned long) info }
+
+static const struct intel_device_info intel_i830_info = {
+ .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_845g_info = {
+ .gen = 2, .num_pipes = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i85x_info = {
+ .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
+ .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i865g_info = {
+ .gen = 2, .num_pipes = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i915g_info = {
+ .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+static const struct intel_device_info intel_i915gm_info = {
+ .gen = 3, .is_mobile = 1, .num_pipes = 2,
+ .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
+};
+static const struct intel_device_info intel_i945g_info = {
+ .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+};
+static const struct intel_device_info intel_i945gm_info = {
+ .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
+ .has_hotplug = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
+};
+
+static const struct intel_device_info intel_i965g_info = {
+ .gen = 4, .is_broadwater = 1, .num_pipes = 2,
+ .has_hotplug = 1,
+ .has_overlay = 1,
+};
+
+static const struct intel_device_info intel_i965gm_info = {
+ .gen = 4, .is_crestline = 1, .num_pipes = 2,
+ .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ .supports_tv = 1,
+};
+
+static const struct intel_device_info intel_g33_info = {
+ .gen = 3, .is_g33 = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+};
+
+static const struct intel_device_info intel_g45_info = {
+ .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_gm45_info = {
+ .gen = 4, .is_g4x = 1, .num_pipes = 2,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .supports_tv = 1,
+ .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_pineview_info = {
+ .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+};
+
+static const struct intel_device_info intel_ironlake_d_info = {
+ .gen = 5, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_ironlake_m_info = {
+ .gen = 5, .is_mobile = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
+ .has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_sandybridge_d_info = {
+ .gen = 6, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_force_wake = 1,
+};
+
+static const struct intel_device_info intel_sandybridge_m_info = {
+ .gen = 6, .is_mobile = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_force_wake = 1,
+};
+
+#define GEN7_FEATURES \
+ .gen = 7, .num_pipes = 3, \
+ .need_gfx_hws = 1, .has_hotplug = 1, \
+ .has_bsd_ring = 1, \
+ .has_blt_ring = 1, \
+ .has_llc = 1, \
+ .has_force_wake = 1
+
+static const struct intel_device_info intel_ivybridge_d_info = {
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_m_info = {
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
+ .is_mobile = 1,
+ .has_fbc = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_q_info = {
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
+ .num_pipes = 0, /* legal, last one wins */
+};
+
+static const struct intel_device_info intel_valleyview_m_info = {
+ GEN7_FEATURES,
+ .is_mobile = 1,
+ .num_pipes = 2,
+ .is_valleyview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
+ .has_llc = 0, /* legal, last one wins */
+};
+
+static const struct intel_device_info intel_valleyview_d_info = {
+ GEN7_FEATURES,
+ .num_pipes = 2,
+ .is_valleyview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
+ .has_llc = 0, /* legal, last one wins */
+};
+
+static const struct intel_device_info intel_haswell_d_info = {
+ GEN7_FEATURES,
+ .is_haswell = 1,
+ .has_ddi = 1,
+ .has_fpga_dbg = 1,
+ .has_vebox_ring = 1,
+};
+
+static const struct intel_device_info intel_haswell_m_info = {
+ GEN7_FEATURES,
+ .is_haswell = 1,
+ .is_mobile = 1,
+ .has_ddi = 1,
+ .has_fpga_dbg = 1,
+ .has_fbc = 1,
+ .has_vebox_ring = 1,
+};
+static struct drm_pci_id_list pciidlist[] = {
+ INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
+ INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
+ INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
+ INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
+ INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
+ INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
+ INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
+ INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
+ INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
+ INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
+ INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
+ INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
+ INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
+ INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
+ INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
+ INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
+ INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
+ INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
+ INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
+ INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
+ INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
+ INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
+ INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
+ INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
+ INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
+ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
+ INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
+ INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
+ INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
+ INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
+ INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
+ INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
+ INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
+ INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
+ INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT3 mobile */
+ INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
+ INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
+ INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
+ INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
+ INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
+ INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
+ INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
+ INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
+ INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
+ INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
+ INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
+ INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
+ INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
+ INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
+ INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
+ INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
+ INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
+ INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
+ INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
+ INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
+ INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
+ INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
+ INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
+ INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
+ INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
+ INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
+ INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
+ INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
+ INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
+ INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
+ INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
+ INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
+ INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
+ INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
+ INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
+ INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
+ INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
+ INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
+ INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
+ INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
+ INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
+ INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
+ INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
+ INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
+ INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
+ INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
+ INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
+ INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
+ INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
+ INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
+ INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
+ INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
+ INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
+ INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
+ INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
+ INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
+ INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
+ {0, 0, 0}
+};
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+
+void intel_detect_pch (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_info_t *isa_dip;
+ int vendor_id, device_id;
+ /* LINTED */
+ int error;
+ /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
+ * (which really amounts to a PCH but no South Display).
+ */
+ if (INTEL_INFO(dev)->num_pipes == 0) {
+ dev_priv->pch_type = PCH_NOP;
+ return;
+ }
+
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ */
+ isa_dip = ddi_find_devinfo("isa", -1, 0);
+
+ if (isa_dip) {
+ vendor_id = ddi_prop_get_int(DDI_DEV_T_ANY, isa_dip, DDI_PROP_DONTPASS,
+ "vendor-id", -1);
+ DRM_DEBUG("vendor_id 0x%x", vendor_id);
+
+ if (vendor_id == PCI_VENDOR_ID_INTEL) {
+ device_id = ddi_prop_get_int(DDI_DEV_T_ANY, isa_dip, DDI_PROP_DONTPASS,
+ "device-id", -1);
+ DRM_DEBUG("device_id 0x%x", device_id);
+ device_id &= INTEL_PCH_DEVICE_ID_MASK;
+ dev_priv->pch_id = (unsigned short) device_id;
+ if (device_id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_IBX;
+ DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+ WARN_ON(!IS_GEN5(dev));
+ } else if (device_id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_CPT;
+ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ } else if (device_id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
+ /* PantherPoint is CPT compatible */
+ dev_priv->pch_type = PCH_CPT;
+ DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+ WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+ } else if (device_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_LPT;
+ DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev));
+ WARN_ON(IS_ULT(dev));
+ } else if (device_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_LPT;
+ DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev));
+ WARN_ON(!IS_ULT(dev));
+ }
+ }
+ }
+}
+
+bool i915_semaphore_is_enabled(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ if (i915_semaphores >= 0)
+ return i915_semaphores;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Enable semaphores on SNB when IO remapping is off */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return 1;
+}
+
+static int i915_drm_freeze(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+
+ /* ignore lid events during suspend */
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ dev_priv->modeset_restore = MODESET_SUSPENDED;
+ mutex_unlock(&dev_priv->modeset_restore_lock);
+
+ intel_set_power_well(dev, true);
+
+ drm_kms_helper_poll_disable(dev);
+
+ /* XXX FIXME: pci_save_state(dev->pdev); */
+
+ /* If KMS is active, we do the leavevt stuff here */
+ if (drm_core_check_feature(dev, DRIVER_MODESET) && dev_priv->gtt.total !=0) {
+
+ if (i915_gem_idle(dev, 0))
+ DRM_ERROR("GEM idle failed, resume may fail\n");
+
+ del_timer_sync(&dev_priv->rps.delayed_resume_timer);
+
+
+ (void) drm_irq_uninstall(dev);
+ dev_priv->enable_hotplug_processing = false;
+ /*
+ * Disable CRTCs directly since we want to preserve sw state
+ * for _thaw.
+ */
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head)
+ dev_priv->display.crtc_disable(crtc);
+
+ intel_modeset_suspend_hw(dev);
+ }
+
+ if (dev_priv->gtt.total !=0)
+ (void) i915_save_state(dev);
+
+ return 0;
+}
+
+int
+i915_suspend(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int error;
+
+ /*
+ * First, try to restore the "console".
+ */
+ (void) drm_fb_helper_force_kernel_mode();
+
+ if (!dev || !dev_priv) {
+ DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
+ DRM_ERROR("DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ error = i915_drm_freeze(dev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int
+__i915_drm_thaw(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int error = 0;
+
+ if (dev_priv->gtt.total !=0)
+ (void) i915_restore_state(dev);
+
+ /* KMS EnterVT equivalent */
+ if (drm_core_check_feature(dev, DRIVER_MODESET) && dev_priv->gtt.total !=0) {
+ intel_init_pch_refclk(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ dev_priv->mm.suspended = 0;
+
+ error = i915_gem_init_hw(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ /* We need working interrupts for modeset enabling ... */
+ (void) drm_irq_install(dev);
+
+ intel_modeset_init_hw(dev);
+ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(dev, true);
+ drm_modeset_unlock_all(dev);
+
+ /*
+ * ... but also need to make sure that hotplug processing
+ * doesn't cause havoc. Like in the driver load code we don't
+ * bother with the tiny race here where we might loose hotplug
+ * notifications.
+ * */
+ intel_hpd_init(dev);
+ dev_priv->enable_hotplug_processing = true;
+ }
+
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ dev_priv->modeset_restore = MODESET_DONE;
+ mutex_unlock(&dev_priv->modeset_restore_lock);
+
+ return error;
+}
+
+static int
+i915_drm_thaw(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int error = 0;
+
+ if (dev_priv->gtt.total !=0)
+ intel_gt_sanitize(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+ dev_priv->gtt.total !=0) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+ __i915_drm_thaw(dev);
+
+ return error;
+}
+
+int
+i915_resume(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ if (dev_priv->gtt.total !=0)
+ intel_gt_sanitize(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+ dev_priv->gtt.total !=0) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ ret = __i915_drm_thaw(dev);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+ return 0;
+}
+
+static int i8xx_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_I85X(dev))
+ return -ENODEV;
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ I915_WRITE(DEBUG_RESET_I830,
+ DEBUG_RESET_DISPLAY |
+ DEBUG_RESET_RENDER |
+ DEBUG_RESET_FULL);
+ POSTING_READ(DEBUG_RESET_I830);
+ msleep(1);
+
+ I915_WRITE(DEBUG_RESET_I830, 0);
+ POSTING_READ(DEBUG_RESET_I830);
+ }
+
+ msleep(1);
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ return 0;
+}
+
+static int i965_reset_complete(struct drm_device *dev)
+{
+ u8 gdrst;
+ (void) pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
+
+static int i965_do_reset(struct drm_device *dev)
+{
+ int ret;
+ u8 gdrst;
+
+ /*
+ * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+ * well as the reset bit (GR/bit 0). Setting the GR bit
+ * triggers the reset; when done, the hardware will clear it.
+ */
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ gdrst | GRDOM_RENDER |
+ GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ gdrst | GRDOM_MEDIA |
+ GRDOM_RESET_ENABLE);
+
+ return wait_for(i965_reset_complete(dev), 500);
+}
+
+static int ironlake_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 gdrst;
+ int ret;
+
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ gdrst &= ~GRDOM_MASK;
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ gdrst &= ~GRDOM_MASK;
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+ return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+}
+
+static int gen6_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ unsigned long irqflags;
+
+ /* Hold gt_lock across reset to prevent any register access
+ * with forcewake not set correctly
+ */
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+
+ /* Reset the chip */
+
+ /* GEN6_GDRST is not in the gt power well, no need to check
+ * for fifo space for the write or forcewake the chip for
+ * the read
+ */
+ I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
+
+ /* Spin waiting for the device to ack the reset request */
+ ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+
+ /* If reset with a user forcewake, try to restore, otherwise turn it off */
+ if (dev_priv->forcewake_count)
+ dev_priv->gt.force_wake_get(dev_priv);
+ else
+ dev_priv->gt.force_wake_put(dev_priv);
+
+ /* Restore fifo count */
+ dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+ return ret;
+}
+
+int intel_gpu_reset(struct drm_device *dev)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6: return gen6_do_reset(dev);
+ case 5: return ironlake_do_reset(dev);
+ case 4: return i965_do_reset(dev);
+ case 2: return i8xx_do_reset(dev);
+ default: return -ENODEV;
+ }
+}
+
+/**
+ * i915_reset - reset chip after a hang
+ * @dev: drm device to reset
+ *
+ * Reset the chip. Useful if a hang is detected. Returns zero on successful
+ * reset or otherwise an error code.
+ *
+ * Procedure is fairly simple:
+ * - reset the chip using the reset reg
+ * - re-init context state
+ * - re-init hardware status page
+ * - re-init ring buffer
+ * - re-init interrupt state
+ * - re-init display
+ */
+int i915_reset(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct timeval cur_time;
+ bool simulated;
+ int ret;
+
+ if (!i915_try_reset)
+ return 0;
+
+ mutex_lock(&dev->struct_mutex);
+
+ i915_gem_reset(dev);
+
+ simulated = dev_priv->gpu_error.stop_rings != 0;
+
+ do_gettimeofday(&cur_time);
+ if (!simulated && cur_time.tv_sec - dev_priv->gpu_error.last_reset < 5) {
+ ret = -ENODEV;
+ DRM_ERROR("GPU hanging too fast, wait 5 secons for another reset");
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ } else {
+ ret = intel_gpu_reset(dev);
+
+ /* Also reset the gpu hangman. */
+ if (simulated) {
+ DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
+ dev_priv->gpu_error.stop_rings = 0;
+ if (ret == -ENODEV) {
+ DRM_ERROR("Reset not implemented, but ignoring "
+ "error for simulated gpu hangs\n");
+ ret = 0;
+ }
+ } else {
+ do_gettimeofday(&cur_time);
+ dev_priv->gpu_error.last_reset = cur_time.tv_sec;
+ }
+ }
+ if (ret) {
+ DRM_ERROR("Failed to reset chip.\n");
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ /* Ok, now get things going again... */
+
+ /*
+ * Everything depends on having the GTT running, so we need to start
+ * there. Fortunately we don't need to do this unless we reset the
+ * chip at a PCI level.
+ *
+ * Next we need to restore the context, but we don't use those
+ * yet either...
+ *
+ * Ring buffer needs to be re-initialized in the KMS case, or if X
+ * was running at the time of the reset (i.e. we weren't VT
+ * switched away).
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET) ||
+ !dev_priv->mm.suspended) {
+ struct intel_ring_buffer *ring;
+ int i;
+
+ dev_priv->mm.suspended = 0;
+
+ i915_gem_init_swizzling(dev);
+
+ for_each_ring(ring, dev_priv, i)
+ ring->init(ring);
+
+ i915_gem_context_init(dev);
+ if (dev_priv->mm.aliasing_ppgtt) {
+ ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
+ if (ret)
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ }
+
+ /*
+ * It would make sense to re-init all the other hw state, at
+ * least the rps/rc6/emon init done within modeset_init_hw. For
+ * some unknown reason, this blows up my ilk, so don't.
+ */
+
+ mutex_unlock(&dev->struct_mutex);
+
+ (void) drm_irq_uninstall(dev);
+ if (drm_irq_install(dev)) {
+ DRM_ERROR("Could not install irq for driver.\n");
+ return -EIO;
+ }
+ intel_hpd_init(dev);
+ } else {
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ return 0;
+}
+
+static struct drm_driver driver = {
+ /* don't use mtrr's here, the Xserver or user space app should
+ * deal with them for intel hardware.
+ */
+ .driver_features =
+ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ .load = i915_driver_load,
+ .unload = i915_driver_unload,
+ .firstopen = i915_driver_firstopen,
+ .open = i915_driver_open,
+ .lastclose = i915_driver_lastclose,
+ .preclose = i915_driver_preclose,
+ .postclose = i915_driver_postclose,
+
+ /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
+ .device_is_agp = i915_driver_device_is_agp,
+ .master_create = i915_master_create,
+ .master_destroy = i915_master_destroy,
+ /* OSOL begin */
+ .entervt = i915_driver_entervt,
+ .leavevt = i915_driver_leavevt,
+ .agp_support_detect = i915_driver_agp_support_detect,
+ /* OSOL end */
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = i915_debugfs_init,
+ .debugfs_cleanup = i915_debugfs_cleanup,
+#endif
+ .gem_init_object = i915_gem_init_object,
+ .gem_free_object = i915_gem_free_object,
+ /*.gem_vm_ops = &i915_gem_vm_ops,*/
+ .gem_fault = i915_gem_fault,
+ .ioctls = i915_ioctls,
+
+ .id_table = pciidlist,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int
+i915_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ struct drm_device *dev;
+ int ret, item;
+
+ item = ddi_get_instance(dip);
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ if (ddi_soft_state_zalloc(i915_statep, item) != DDI_SUCCESS) {
+ DRM_ERROR("failed to alloc softstate, item = %d", item);
+ return (DDI_FAILURE);
+ }
+
+ dev = ddi_get_soft_state(i915_statep, item);
+ if (!dev) {
+ DRM_ERROR("cannot get soft state");
+ return (DDI_FAILURE);
+ }
+
+ dev->devinfo = dip;
+
+ if (!(driver.driver_features & DRIVER_MODESET))
+ driver.get_vblank_timestamp = NULL;
+
+ ret = drm_init(dev, &driver);
+ if (ret != DDI_SUCCESS)
+ (void) ddi_soft_state_free(i915_statep, item);
+
+ return (ret);
+
+ case DDI_RESUME:
+ dev = ddi_get_soft_state(i915_statep, item);
+ if (!dev) {
+ DRM_ERROR("cannot get soft state");
+ return (DDI_FAILURE);
+ }
+
+ return (i915_resume(dev));
+ }
+
+ DRM_ERROR("only supports attach or resume");
+ return (DDI_FAILURE);
+}
+
+static int
+i915_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ struct drm_device *dev;
+ int item;
+
+ item = ddi_get_instance(dip);
+ dev = ddi_get_soft_state(i915_statep, item);
+ if (!dev) {
+ DRM_ERROR("cannot get soft state");
+ return (DDI_FAILURE);
+ }
+
+ switch (cmd) {
+ case DDI_DETACH:
+ drm_exit(dev);
+ (void) ddi_soft_state_free(i915_statep, item);
+ return (DDI_SUCCESS);
+
+ case DDI_SUSPEND:
+ return (i915_suspend(dev));
+ }
+
+ DRM_ERROR("only supports detach or suspend");
+ return (DDI_FAILURE);
+}
+
+static int
+/* LINTED */
+i915_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
+{
+ struct drm_minor *minor;
+
+ minor = idr_find(&drm_minors_idr, DRM_DEV2MINOR((dev_t)arg));
+ if (!minor)
+ return (DDI_FAILURE);
+ if (!minor->dev || !minor->dev->devinfo)
+ return (DDI_FAILURE);
+
+ switch (infocmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ *result = (void *)minor->dev->devinfo;
+ return (DDI_SUCCESS);
+
+ case DDI_INFO_DEVT2INSTANCE:
+ *result = (void *)(uintptr_t)ddi_get_instance(minor->dev->devinfo);
+ return (DDI_SUCCESS);
+ }
+
+ return (DDI_FAILURE);
+}
+
+static int
+i915_quiesce(dev_info_t *dip)
+{
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ dev = ddi_get_soft_state(i915_statep, ddi_get_instance(dip));
+
+ if (!dev)
+ return (DDI_FAILURE);
+
+ dev_priv = dev->dev_private;
+
+ if (dev_priv && dev_priv->gtt.total !=0) {
+
+
+
+ (void) drm_fb_helper_force_kernel_mode();
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+ i915_gem_retire_requests(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (dev_priv->fbcon_obj != NULL)
+ intel_fbdev_fini(dev);
+
+ drm_kms_helper_poll_fini(dev);
+ mutex_lock(&dev->struct_mutex);
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ /* Skip inactive CRTCs */
+ if (!crtc->fb)
+ continue;
+
+ intel_increase_pllclock(crtc);
+ }
+
+ intel_disable_fbc(dev);
+
+ intel_disable_gt_powersave(dev);
+
+ ironlake_teardown_rc6(dev);
+
+ mutex_unlock(&dev->struct_mutex);
+ drm_mode_config_cleanup(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_free_all_phys_object(dev);
+ i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_context_fini(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ i915_gem_cleanup_stolen(dev);
+ drm_mm_takedown(&dev_priv->mm.stolen);
+ intel_cleanup_overlay(dev);
+
+ i915_gem_lastclose(dev);
+
+ if (dev_priv->gtt.scratch_page)
+ teardown_scratch_page(dev);
+
+ if (MDB_TRACK_ENABLE) {
+ struct batch_info_list *r_list, *list_temp;
+ list_for_each_entry_safe(r_list, list_temp, struct batch_info_list, &dev_priv->batch_list, head) {
+ list_del(&r_list->head);
+ drm_free(r_list->obj_list, r_list->num * sizeof(caddr_t), DRM_MEM_MAPS);
+ drm_free(r_list, sizeof (struct batch_info_list), DRM_MEM_MAPS);
+ }
+ list_del(&dev_priv->batch_list);
+ }
+
+ if (dev->old_gtt) {
+ intel_rw_gtt(dev, dev->old_gtt_size,
+ 0, (void *) dev->old_gtt, 1);
+ kmem_free(dev->old_gtt, dev->old_gtt_size);
+ }
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static int __init i915_init(void)
+{
+ driver.num_ioctls = i915_max_ioctl;
+
+ driver.driver_features |= DRIVER_MODESET;
+
+ return 0;
+}
+
+static void __exit i915_exit(void)
+{
+
+}
+
+int
+_init(void)
+{
+ int ret;
+
+ ret = ddi_soft_state_init(&i915_statep,
+ sizeof (struct drm_device), DRM_MAX_INSTANCES);
+ if (ret)
+ return (ret);
+
+ ret = i915_init();
+ if (ret) {
+ ddi_soft_state_fini(&i915_statep);
+ return (ret);
+ }
+
+ ret = mod_install(&modlinkage);
+ if (ret) {
+ i915_exit();
+ ddi_soft_state_fini(&i915_statep);
+ return (ret);
+ }
+
+ return (ret);
+}
+
+int
+_fini(void)
+{
+ int ret;
+
+ ret = mod_remove(&modlinkage);
+ if (ret)
+ return (ret);
+
+ i915_exit();
+
+ ddi_soft_state_fini(&i915_statep);
+
+ return (ret);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+ ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE))
+
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+ /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
+ * chip from rc6 before touching it for real. MI_MODE is masked, hence
+ * harmless to write 0 into. */
+ I915_WRITE_NOTRACE(MI_MODE, 0);
+}
+
+static void
+hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+{
+ if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
+ (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_INFO("Unknown unclaimed register before writing to %x\n",
+ reg);
+ I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
+
+static void
+hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+{
+ if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
+ (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_INFO("Unclaimed write to %x\n", reg);
+ I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
+
+u8 i915_read8(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u8 val;
+
+ if (IS_GEN5(dev_priv->dev))
+ ilk_dummy_write(dev_priv);
+
+ if (NEEDS_FORCE_WAKE(dev_priv, reg)) {
+ unsigned long irqflags;
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (dev_priv->forcewake_count == 0)
+ dev_priv->gt.force_wake_get(dev_priv);
+ val = DRM_READ8(dev_priv->regs, reg);
+ if (dev_priv->forcewake_count == 0)
+ dev_priv->gt.force_wake_put(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+ } else
+ val = DRM_READ8(dev_priv->regs, (reg));
+ return val;
+}
+
+u16 i915_read16(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u16 val;
+
+ if (IS_GEN5(dev_priv->dev))
+ ilk_dummy_write(dev_priv);
+
+ if (NEEDS_FORCE_WAKE(dev_priv, reg)) {
+ unsigned long irqflags;
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (dev_priv->forcewake_count == 0)
+ dev_priv->gt.force_wake_get(dev_priv);
+ val = DRM_READ16(dev_priv->regs, reg);
+ if (dev_priv->forcewake_count == 0)
+ dev_priv->gt.force_wake_get(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+ } else
+ val = DRM_READ16(dev_priv->regs, (reg));
+ return val;
+}
+
+u32 i915_read32(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u32 val;
+
+ if (IS_GEN5(dev_priv->dev))
+ ilk_dummy_write(dev_priv);
+
+ if (NEEDS_FORCE_WAKE(dev_priv, reg)) {
+ unsigned long irqflags;
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (dev_priv->forcewake_count == 0)
+ dev_priv->gt.force_wake_get(dev_priv);
+ val = DRM_READ32(dev_priv->regs, reg);
+ if (dev_priv->forcewake_count == 0)
+ dev_priv->gt.force_wake_get(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+ } else
+ val = DRM_READ32(dev_priv->regs, (reg));
+ return val;
+}
+
+u64 i915_read64(struct drm_i915_private *dev_priv, u32 reg)
+{
+ u64 val;
+
+ if (IS_GEN5(dev_priv->dev))
+ ilk_dummy_write(dev_priv);
+
+ if (NEEDS_FORCE_WAKE(dev_priv, reg)) {
+ unsigned long irqflags;
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (dev_priv->forcewake_count == 0)
+ dev_priv->gt.force_wake_get(dev_priv);
+ val = DRM_READ64(dev_priv->regs, reg);
+ if (dev_priv->forcewake_count == 0)
+ dev_priv->gt.force_wake_get(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+ } else
+ val = DRM_READ64(dev_priv->regs, (reg));
+ return val;
+}
+
+void i915_write8(struct drm_i915_private *dev_priv, u32 reg,
+ u8 val)
+{
+ unsigned long irqflags;
+ u32 __fifo_ret = 0;
+
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (NEEDS_FORCE_WAKE(dev_priv, reg))
+ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv);
+
+ if (IS_GEN5(dev_priv->dev))
+ ilk_dummy_write(dev_priv);
+
+ hsw_unclaimed_reg_clear(dev_priv, reg);
+
+ DRM_WRITE8(dev_priv->regs, (reg), (val));
+ if (__fifo_ret)
+ gen6_gt_check_fifodbg(dev_priv);
+ hsw_unclaimed_reg_check(dev_priv, reg);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+}
+
+void i915_write16(struct drm_i915_private *dev_priv, u32 reg,
+ u16 val)
+{
+ unsigned long irqflags;
+ u32 __fifo_ret = 0;
+
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (NEEDS_FORCE_WAKE(dev_priv, reg))
+ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv);
+
+ if (IS_GEN5(dev_priv->dev))
+ ilk_dummy_write(dev_priv);
+
+ hsw_unclaimed_reg_clear(dev_priv, reg);
+
+ DRM_WRITE16(dev_priv->regs, (reg), (val));
+ if (__fifo_ret)
+ gen6_gt_check_fifodbg(dev_priv);
+ hsw_unclaimed_reg_check(dev_priv, reg);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+}
+
+void i915_write32(struct drm_i915_private *dev_priv, u32 reg,
+ u32 val)
+{
+ unsigned long irqflags;
+ u32 __fifo_ret = 0;
+
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (NEEDS_FORCE_WAKE(dev_priv, reg))
+ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv);
+
+ if (IS_GEN5(dev_priv->dev))
+ ilk_dummy_write(dev_priv);
+
+ hsw_unclaimed_reg_clear(dev_priv, reg);
+
+ DRM_WRITE32(dev_priv->regs, (reg), (val));
+ if (__fifo_ret)
+ gen6_gt_check_fifodbg(dev_priv);
+ hsw_unclaimed_reg_check(dev_priv, reg);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+}
+
+void i915_write64(struct drm_i915_private *dev_priv, u32 reg,
+ u64 val)
+{
+ unsigned long irqflags;
+ u32 __fifo_ret = 0;
+
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (NEEDS_FORCE_WAKE(dev_priv, reg))
+ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv);
+
+ if (IS_GEN5(dev_priv->dev))
+ ilk_dummy_write(dev_priv);
+
+ hsw_unclaimed_reg_clear(dev_priv, reg);
+
+ DRM_WRITE64(dev_priv->regs, (reg), (val));
+ if (__fifo_ret)
+ gen6_gt_check_fifodbg(dev_priv);
+ hsw_unclaimed_reg_check(dev_priv, reg);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+}
+
+#define __i915_read(x) \
+u ## x i915_read ## x(struct drm_i915_private *dev_priv, u32 reg);
+
+__i915_read(8)
+__i915_read(16)
+__i915_read(32)
+__i915_read(64)
+#undef __i915_read
+
+#define __i915_write(x) \
+void i915_write ## x(struct drm_i915_private *dev_priv, u32 reg, \
+ u ## x val);
+
+__i915_write(8)
+__i915_write(16)
+__i915_write(32)
+__i915_write(64)
+#undef __i915_write
+
+static const struct register_whitelist {
+ uint64_t offset;
+ uint32_t size;
+ uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+} whitelist[] = {
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+};
+
+int i915_reg_read_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_reg_read *reg = data;
+ struct register_whitelist const *entry = whitelist;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+ if (entry->offset == reg->offset &&
+ (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(whitelist))
+ return -EINVAL;
+
+ switch (entry->size) {
+ case 8:
+ reg->val = I915_READ64(reg->offset);
+ break;
+ case 4:
+ reg->val = I915_READ(reg->offset);
+ break;
+ case 2:
+ reg->val = I915_READ16(reg->offset);
+ break;
+ case 1:
+ reg->val = I915_READ8(reg->offset);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void i915_gem_chipset_flush(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ drm_agp_chipset_flush(dev);
+}
+
+void i915_driver_agp_support_detect(struct drm_device *dev, unsigned long flags)
+{
+ struct intel_device_info *info;
+ info = (struct intel_device_info *) flags;
+
+ /* Remove AGP support for GEN6+ platform */
+ if (info->gen >= 6)
+ driver.driver_features &= ~DRIVER_USE_AGP;
+}
diff --git a/usr/src/uts/intel/io/i915/i915_drv.h b/usr/src/uts/intel/io/i915/i915_drv.h
new file mode 100644
index 0000000..a5c6e75
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_drv.h
@@ -0,0 +1,2046 @@
+/*
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_DRV_H_
+#define _I915_DRV_H_
+
+#include "i915_drm.h"
+#include "i915_reg.h"
+#include "intel_bios.h"
+#include "intel_ringbuffer.h"
+
+#include <sys/systm.h> /* for __lintzero */
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
+
+#define DRIVER_NAME "i915"
+#define DRIVER_DESC "Intel Graphics for Solaris"
+#define DRIVER_DATE "2013/06/19"
+
+enum pipe {
+ PIPE_A = 0,
+ PIPE_B,
+ PIPE_C,
+ I915_MAX_PIPES
+};
+#define pipe_name(p) ((p) + 'A')
+
+enum transcoder {
+ TRANSCODER_A = 0,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP = 0xF,
+};
+#define transcoder_name(t) ((t) + 'A')
+
+enum plane {
+ PLANE_A = 0,
+ PLANE_B,
+ PLANE_C,
+};
+#define plane_name(p) ((p) + 'A')
+
+#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
+
+enum port {
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ I915_MAX_PORTS
+};
+#define port_name(p) ((p) + 'A')
+
+enum intel_display_power_domain {
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_A_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_B_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
+};
+
+#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
+ ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
+#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
+
+enum hpd_pin {
+ HPD_NONE = 0,
+ HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
+ HPD_TV = HPD_NONE, /* TV is known to be unreliable */
+ HPD_CRT,
+ HPD_SDVO_B,
+ HPD_SDVO_C,
+ HPD_PORT_B,
+ HPD_PORT_C,
+ HPD_PORT_D,
+ HPD_NUM_PINS
+};
+
+#define I915_GEM_GPU_DOMAINS \
+ (I915_GEM_DOMAIN_RENDER | \
+ I915_GEM_DOMAIN_SAMPLER | \
+ I915_GEM_DOMAIN_COMMAND | \
+ I915_GEM_DOMAIN_INSTRUCTION | \
+ I915_GEM_DOMAIN_VERTEX)
+
+#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
+
+#define for_each_encoder_on_crtc(dev, __crtc, _intel_encoder) \
+ list_for_each_entry((_intel_encoder), struct intel_encoder, &(dev)->mode_config.encoder_list, base.head) \
+ if ((_intel_encoder)->base.crtc == (__crtc))
+
+struct drm_i915_private;
+
+enum intel_dpll_id {
+ DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
+ /* real shared dpll ids must be >= 0 */
+ DPLL_ID_PCH_PLL_A,
+ DPLL_ID_PCH_PLL_B,
+};
+#define I915_NUM_PLLS 2
+
+struct intel_dpll_hw_state {
+ uint32_t dpll;
+ uint32_t fp0;
+ uint32_t fp1;
+};
+
+struct intel_shared_dpll {
+ int refcount; /* count of number of CRTCs sharing this PLL */
+ int active; /* count of number of active CRTCs (i.e. DPMS on) */
+ bool on; /* is the PLL actually active? Disabled during modeset */
+ const char *name;
+ /* should match the index in the dev_priv->shared_dplls array */
+ enum intel_dpll_id id;
+ struct intel_dpll_hw_state hw_state;
+ void (*enable)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+ void (*disable)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+ bool (*get_hw_state)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state);
+};
+
+/* Used by dp and fdi links */
+struct intel_link_m_n {
+ uint32_t tu;
+ uint32_t gmch_m;
+ uint32_t gmch_n;
+ uint32_t link_m;
+ uint32_t link_n;
+};
+
+void intel_link_compute_m_n(int bpp, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n);
+
+extern int gpu_dump;
+
+struct intel_ddi_plls {
+ int spll_refcount;
+ int wrpll1_refcount;
+ int wrpll2_refcount;
+};
+
+/* Interface history:
+ *
+ * 1.1: Original.
+ * 1.2: Add Power Management
+ * 1.3: Add vblank support
+ * 1.4: Fix cmdbuffer path, add heap destroy
+ * 1.5: Add vblank pipe configuration
+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
+ * - Support vertical blank on secondary display pipe
+ */
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 6
+#define DRIVER_PATCHLEVEL 20130900
+
+#define WATCH_COHERENCY 0
+#define WATCH_LISTS 0
+#define WATCH_GTT 0
+
+#define I915_GEM_PHYS_CURSOR_0 1
+#define I915_GEM_PHYS_CURSOR_1 2
+#define I915_GEM_PHYS_OVERLAY_REGS 3
+#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
+
+struct drm_i915_gem_phys_object {
+ int id;
+ struct page **page_list;
+ drm_dma_handle_t *handle;
+ struct drm_i915_gem_object *cur_obj;
+};
+
+struct mem_block {
+ struct mem_block *next;
+ struct mem_block *prev;
+ int start;
+ int size;
+ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
+};
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct intel_opregion {
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ struct opregion_asle *asle;
+ void *vbt;
+ u32 *lid_state;
+};
+#define OPREGION_SIZE (8*1024)
+
+struct intel_overlay;
+struct intel_overlay_error_state;
+
+struct drm_i915_master_private {
+ drm_local_map_t *sarea;
+ struct _drm_i915_sarea *sarea_priv;
+};
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 32
+/* 16 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 6
+
+struct drm_i915_fence_reg {
+ struct list_head lru_list;
+ struct drm_i915_gem_object *obj;
+ int pin_count;
+};
+
+#define I2C_NAME_SIZE 20
+
+struct sdvo_device_mapping {
+ u8 initialized;
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+ u8 i2c_pin;
+ u8 ddc_pin;
+};
+
+struct intel_display_error_state;
+struct drm_i915_error_state {
+ struct kref ref;
+ u32 eir;
+ u32 pgtbl_er;
+ u32 ier;
+ u32 ccid;
+ u32 derrmr;
+ u32 forcewake;
+ bool waiting[I915_NUM_RINGS];
+ u32 pipestat[I915_MAX_PIPES];
+ u32 tail[I915_NUM_RINGS];
+ u32 head[I915_NUM_RINGS];
+ u32 ctl[I915_NUM_RINGS];
+ u32 ipeir[I915_NUM_RINGS];
+ u32 ipehr[I915_NUM_RINGS];
+ u32 instdone[I915_NUM_RINGS];
+ u32 acthd[I915_NUM_RINGS];
+ u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
+ /* our own tracking of ring head and tail */
+ u32 cpu_ring_head[I915_NUM_RINGS];
+ u32 cpu_ring_tail[I915_NUM_RINGS];
+ u32 error; /* gen6+ */
+ u32 err_int; /* gen7 */
+ u32 instpm[I915_NUM_RINGS];
+ u32 instps[I915_NUM_RINGS];
+ u32 extra_instdone[I915_NUM_INSTDONE_REG];
+ u32 seqno[I915_NUM_RINGS];
+ u64 bbaddr;
+ u32 fault_reg[I915_NUM_RINGS];
+ u32 done_reg;
+ u32 faddr[I915_NUM_RINGS];
+ u64 fence[I915_MAX_NUM_FENCES];
+ struct timeval time;
+ struct drm_i915_error_ring {
+ struct drm_i915_error_object {
+ int page_count;
+ u32 gtt_offset;
+ u32 **pages;
+ } *ringbuffer, *batchbuffer;
+ struct drm_i915_error_request {
+ long err_jiffies;
+ u32 seqno;
+ u32 tail;
+ } *requests;
+ int num_requests;
+ } ring[I915_NUM_RINGS];
+ struct drm_i915_error_buffer {
+ size_t size;
+ u32 name;
+ u32 rseqno, wseqno;
+ u32 gtt_offset;
+ u32 read_domains;
+ u32 write_domain;
+ s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
+ s32 pinned:2;
+ u32 tiling:2;
+ u32 dirty:1;
+ u32 purgeable:1;
+ u32 ring:4;
+ u32 cache_level:2;
+ } *active_bo, *pinned_bo;
+ u32 active_bo_count, pinned_bo_count;
+ struct intel_overlay_error_state *overlay;
+ struct intel_display_error_state *display;
+};
+
+struct intel_crtc_config;
+struct intel_crtc;
+struct intel_limit;
+struct dpll;
+
+struct drm_i915_display_funcs {
+ bool (*fbc_enabled)(struct drm_device *dev);
+ void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
+ void (*disable_fbc)(struct drm_device *dev);
+ int (*get_display_clock_speed)(struct drm_device *dev);
+ int (*get_fifo_size)(struct drm_device *dev, int plane);
+ /**
+ * find_dpll() - Find the best values for the PLL
+ * @limit: limits for the PLL
+ * @crtc: current CRTC
+ * @target: target frequency in kHz
+ * @refclk: reference clock frequency in kHz
+ * @match_clock: if provided, @best_clock P divider must
+ * match the P divider from @match_clock
+ * used for LVDS downclocking
+ * @best_clock: best PLL values found
+ *
+ * Returns true on success, false on failure.
+ */
+ bool (*find_dpll)(const struct intel_limit *limit,
+ struct drm_crtc *crtc,
+ int target, int refclk,
+ struct dpll *match_clock,
+ struct dpll *best_clock);
+ void (*update_wm)(struct drm_device *dev);
+ void (*update_sprite_wm)(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size,
+ bool enable);
+ void (*modeset_global_resources)(struct drm_device *dev);
+ /* Returns the active state of the crtc, and if the crtc is active,
+ * fills out the pipe-config with the hw state. */
+ bool (*get_pipe_config)(struct intel_crtc *,
+ struct intel_crtc_config *);
+ int (*crtc_mode_set)(struct drm_crtc *crtc,
+ int x, int y,
+ struct drm_framebuffer *old_fb);
+ void (*crtc_enable)(struct drm_crtc *crtc);
+ void (*crtc_disable)(struct drm_crtc *crtc);
+ void (*off)(struct drm_crtc *crtc);
+ void (*write_eld)(struct drm_connector *connector,
+ struct drm_crtc *crtc);
+ void (*fdi_link_train)(struct drm_crtc *crtc);
+ void (*init_clock_gating)(struct drm_device *dev);
+ int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj);
+ int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y);
+ void (*hpd_irq_setup)(struct drm_device *dev);
+ /* clock updates for mode set */
+ /* cursor updates */
+ /* render clock increase/decrease */
+ /* display clock increase/decrease */
+ /* pll clock increase/decrease */
+};
+
+struct drm_i915_gt_funcs {
+ void (*force_wake_get)(struct drm_i915_private *dev_priv);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv);
+};
+
+#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
+ func(is_mobile) sep \
+ func(is_i85x) sep \
+ func(is_i915g) sep \
+ func(is_i945gm) sep \
+ func(is_g33) sep \
+ func(need_gfx_hws) sep \
+ func(is_g4x) sep \
+ func(is_pineview) sep \
+ func(is_broadwater) sep \
+ func(is_crestline) sep \
+ func(is_ivybridge) sep \
+ func(is_valleyview) sep \
+ func(is_haswell) sep \
+ func(has_force_wake) sep \
+ func(has_fbc) sep \
+ func(has_pipe_cxsr) sep \
+ func(has_hotplug) sep \
+ func(cursor_needs_physical) sep \
+ func(has_overlay) sep \
+ func(overlay_needs_physical) sep \
+ func(supports_tv) sep \
+ func(has_bsd_ring) sep \
+ func(has_blt_ring) sep \
+ func(has_vebox_ring) sep \
+ func(has_llc) sep \
+ func(has_ddi) sep \
+ func(has_fpga_dbg)
+
+#define DEFINE_FLAG(name) u8 name:1
+#define SEP_SEMICOLON ;
+
+struct intel_device_info {
+ u32 display_mmio_offset;
+ u8 num_pipes:3;
+ u8 gen;
+ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+};
+
+#undef DEFINE_FLAG
+#undef SEP_SEMICOLON
+
+enum i915_cache_level {
+ I915_CACHE_NONE = 0,
+ I915_CACHE_LLC,
+ I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
+};
+
+typedef uint32_t gen6_gtt_pte_t;
+
+/* The Graphics Translation Table is the way in which GEN hardware translates a
+ * Graphics Virtual Address into a Physical Address. In addition to the normal
+ * collateral associated with any va->pa translations GEN hardware also has a
+ * portion of the GTT which can be mapped by the CPU and remain both coherent
+ * and correct (in cases like swizzling). That region is referred to as GMADR in
+ * the spec.
+ */
+struct i915_gtt {
+ unsigned long start; /* Start offset of used GTT */
+ size_t total; /* Total size GTT can map */
+ size_t stolen_size; /* Total size of stolen memory */
+
+ unsigned long mappable_end; /* End offset that we can CPU map */
+ drm_local_map_t gtt_mapping;
+ unsigned long mappable_base; /* PA of our GMADR */
+
+
+ struct drm_gem_object *scratch_page;
+
+ caddr_t virtual_gtt;
+
+ /* global gtt ops */
+ int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
+ size_t *stolen);
+ void (*gtt_remove)(struct drm_device *dev);
+ void (*gtt_clear_range)(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ uint32_t type);
+ void (*gtt_insert_entries)(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+ gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
+ uint64_t addr,
+ enum i915_cache_level level);
+};
+#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
+
+#define I915_PPGTT_PD_ENTRIES 512
+#define I915_PPGTT_PT_ENTRIES 1024
+struct i915_hw_ppgtt {
+ struct drm_device *dev;
+ unsigned num_pd_entries;
+ pfn_t *pt_pages;
+ uint32_t pd_offset;
+ ddi_dma_handle_t dma_hdl;
+ ddi_acc_handle_t acc_hdl;
+ caddr_t kaddr;
+ size_t real_size; /* real size of memory */
+ uint64_t scratch_page_paddr;
+ /* pte functions, mirroring the interface of the global gtt. */
+ void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
+ unsigned int first_entry,
+ unsigned int num_entries);
+ void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
+ unsigned first_entry, unsigned num_entries,
+ pfn_t *pages, enum i915_cache_level cache_level);
+ gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
+ uint64_t addr,
+ enum i915_cache_level level);
+ int (*enable)(struct drm_device *dev);
+ void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
+};
+
+struct i915_ctx_hang_stats {
+ /* This context had batch pending when hang was declared */
+ unsigned batch_pending;
+
+ /* This context had batch active when hang was declared */
+ unsigned batch_active;
+};
+
+/* This must match up with the value previously used for execbuf2.rsvd1. */
+#define DEFAULT_CONTEXT_ID 0
+struct i915_hw_context {
+ struct kref ref;
+ int id;
+ bool is_initialized;
+ struct drm_i915_file_private *file_priv;
+ struct intel_ring_buffer *ring;
+ struct drm_i915_gem_object *obj;
+ struct i915_ctx_hang_stats hang_stats;
+};
+
+enum no_fbc_reason {
+ FBC_NO_OUTPUT, /* no outputs enabled to compress */
+ FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
+ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+ FBC_MODE_TOO_LARGE, /* mode too large for compression */
+ FBC_BAD_PLANE, /* fbc not supported on plane */
+ FBC_NOT_TILED, /* buffer not tiled */
+ FBC_MULTIPLE_PIPES, /* more than one pipe active */
+ FBC_MODULE_PARAM,
+};
+
+enum intel_pch {
+ PCH_NONE = 0, /* No PCH present */
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint PCH */
+ PCH_LPT, /* Lynxpoint PCH */
+ PCH_NOP,
+};
+
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
+#define QUIRK_PIPEA_FORCE (1<<0)
+#define QUIRK_LVDS_SSC_DISABLE (1<<1)
+#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
+
+struct intel_fbdev;
+struct intel_fbc_work;
+
+struct intel_gmbus {
+ struct i2c_adapter adapter;
+ bool force_bit;
+ u32 reg0;
+ u32 gpio_reg;
+ struct drm_i915_private *dev_priv;
+};
+
+typedef struct drm_i915_bridge_dev {
+ ldi_ident_t ldi_id;
+ ldi_handle_t bridge_dev_hdl;
+} drm_i915_bridge_dev_t;
+
+struct i915_suspend_saved_registers {
+ u8 saveLBB;
+ u32 saveDSPACNTR;
+ u32 saveDSPBCNTR;
+ u32 saveDSPARB;
+ u32 saveHWS;
+ u32 savePIPEACONF;
+ u32 savePIPEBCONF;
+ u32 savePIPEASRC;
+ u32 savePIPEBSRC;
+ u32 saveFPA0;
+ u32 saveFPA1;
+ u32 saveDPLL_A;
+ u32 saveDPLL_A_MD;
+ u32 saveHTOTAL_A;
+ u32 saveHBLANK_A;
+ u32 saveHSYNC_A;
+ u32 saveVTOTAL_A;
+ u32 saveVBLANK_A;
+ u32 saveVSYNC_A;
+ u32 saveBCLRPAT_A;
+ u32 saveTRANSACONF;
+ u32 saveTRANS_HTOTAL_A;
+ u32 saveTRANS_HBLANK_A;
+ u32 saveTRANS_HSYNC_A;
+ u32 saveTRANS_VTOTAL_A;
+ u32 saveTRANS_VBLANK_A;
+ u32 saveTRANS_VSYNC_A;
+ u32 savePIPEASTAT;
+ u32 saveDSPASTRIDE;
+ u32 saveDSPASIZE;
+ u32 saveDSPAPOS;
+ u32 saveDSPAADDR;
+ u32 saveDSPASURF;
+ u32 saveDSPATILEOFF;
+ u32 savePFIT_PGM_RATIOS;
+ u32 saveBLC_HIST_CTL;
+ u32 saveBLC_PWM_CTL;
+ u32 saveBLC_PWM_CTL2;
+ u32 saveBLC_CPU_PWM_CTL;
+ u32 saveBLC_CPU_PWM_CTL2;
+ u32 saveFPB0;
+ u32 saveFPB1;
+ u32 saveDPLL_B;
+ u32 saveDPLL_B_MD;
+ u32 saveHTOTAL_B;
+ u32 saveHBLANK_B;
+ u32 saveHSYNC_B;
+ u32 saveVTOTAL_B;
+ u32 saveVBLANK_B;
+ u32 saveVSYNC_B;
+ u32 saveBCLRPAT_B;
+ u32 saveTRANSBCONF;
+ u32 saveTRANS_HTOTAL_B;
+ u32 saveTRANS_HBLANK_B;
+ u32 saveTRANS_HSYNC_B;
+ u32 saveTRANS_VTOTAL_B;
+ u32 saveTRANS_VBLANK_B;
+ u32 saveTRANS_VSYNC_B;
+ u32 savePIPEBSTAT;
+ u32 saveDSPBSTRIDE;
+ u32 saveDSPBSIZE;
+ u32 saveDSPBPOS;
+ u32 saveDSPBADDR;
+ u32 saveDSPBSURF;
+ u32 saveDSPBTILEOFF;
+ u32 saveVGA0;
+ u32 saveVGA1;
+ u32 saveVGA_PD;
+ u32 saveVGACNTRL;
+ u32 saveADPA;
+ u32 saveLVDS;
+ u32 savePP_ON_DELAYS;
+ u32 savePP_OFF_DELAYS;
+ u32 saveDVOA;
+ u32 saveDVOB;
+ u32 saveDVOC;
+ u32 savePP_ON;
+ u32 savePP_OFF;
+ u32 savePP_CONTROL;
+ u32 savePP_DIVISOR;
+ u32 savePFIT_CONTROL;
+ u32 save_palette_a[256];
+ u32 save_palette_b[256];
+ u32 saveDPFC_CB_BASE;
+ u32 saveFBC_CFB_BASE;
+ u32 saveFBC_LL_BASE;
+ u32 saveFBC_CONTROL;
+ u32 saveFBC_CONTROL2;
+ u32 saveIER;
+ u32 saveIIR;
+ u32 saveIMR;
+ u32 saveDEIER;
+ u32 saveDEIMR;
+ u32 saveGTIER;
+ u32 saveGTIMR;
+ u32 saveFDI_RXA_IMR;
+ u32 saveFDI_RXB_IMR;
+ u32 saveCACHE_MODE_0;
+ u32 saveMI_ARB_STATE;
+ u32 saveSWF0[16];
+ u32 saveSWF1[16];
+ u32 saveSWF2[3];
+ u8 saveMSR;
+ u8 saveSR[8];
+ u8 saveGR[25];
+ u8 saveAR_INDEX;
+ u8 saveAR[21];
+ u8 saveDACMASK;
+ u8 saveCR[37];
+ uint64_t saveFENCE[I915_MAX_NUM_FENCES];
+ u32 saveCURACNTR;
+ u32 saveCURAPOS;
+ u32 saveCURABASE;
+ u32 saveCURBCNTR;
+ u32 saveCURBPOS;
+ u32 saveCURBBASE;
+ u32 saveCURSIZE;
+ u32 saveDP_B;
+ u32 saveDP_C;
+ u32 saveDP_D;
+ u32 savePIPEA_GMCH_DATA_M;
+ u32 savePIPEB_GMCH_DATA_M;
+ u32 savePIPEA_GMCH_DATA_N;
+ u32 savePIPEB_GMCH_DATA_N;
+ u32 savePIPEA_DP_LINK_M;
+ u32 savePIPEB_DP_LINK_M;
+ u32 savePIPEA_DP_LINK_N;
+ u32 savePIPEB_DP_LINK_N;
+ u32 saveFDI_RXA_CTL;
+ u32 saveFDI_TXA_CTL;
+ u32 saveFDI_RXB_CTL;
+ u32 saveFDI_TXB_CTL;
+ u32 savePFA_CTL_1;
+ u32 savePFB_CTL_1;
+ u32 savePFA_WIN_SZ;
+ u32 savePFB_WIN_SZ;
+ u32 savePFA_WIN_POS;
+ u32 savePFB_WIN_POS;
+ u32 savePCH_DREF_CONTROL;
+ u32 saveDISP_ARB_CTL;
+ u32 savePIPEA_DATA_M1;
+ u32 savePIPEA_DATA_N1;
+ u32 savePIPEA_LINK_M1;
+ u32 savePIPEA_LINK_N1;
+ u32 savePIPEB_DATA_M1;
+ u32 savePIPEB_DATA_N1;
+ u32 savePIPEB_LINK_M1;
+ u32 savePIPEB_LINK_N1;
+ u32 saveMCHBAR_RENDER_STANDBY;
+ u32 savePCH_PORT_HOTPLUG;
+ u32 pgtbl_ctl;
+};
+
+
+struct batch_info_list {
+ struct list_head head;
+ uint32_t num;
+ uint32_t seqno;
+ caddr_t *obj_list;
+};
+
+struct intel_gen6_power_mgmt {
+ struct work_struct work;
+ struct work_struct vlv_work;
+ struct timer_list vlv_timer;
+ u32 pm_iir;
+ /* lock - irqsave spinlock that protectects the work_struct and
+ * pm_iir. */
+ spinlock_t lock;
+
+ /* The below variables an all the rps hw state are protected by
+ * dev->struct mutext. */
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 rpe_delay;
+ u8 hw_max;
+
+ struct work_struct delayed_resume_work;
+ struct timer_list delayed_resume_timer;
+
+ /*
+ * Protects RPS/RC6 register access and PCU communication.
+ * Must be taken after struct_mutex if nested.
+ */
+ struct mutex hw_lock;
+};
+
+/* defined intel_pm.c */
+extern spinlock_t mchdev_lock;
+
+struct intel_ilk_power_mgmt {
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+ u8 fmax;
+ u8 fstart;
+
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ clock_t last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c_m;
+ int r_t;
+
+ struct drm_i915_gem_object *pwrctx;
+ struct drm_i915_gem_object *renderctx;
+};
+
+/* Power well structure for haswell */
+struct i915_power_well {
+ struct drm_device *device;
+ spinlock_t lock;
+ /* power well enable/disable usage count */
+ int count;
+ int i915_request;
+};
+
+struct i915_dri1_state {
+ unsigned allow_batchbuffer : 1;
+ drm_local_map_t gfx_hws_cpu_addr;
+
+ unsigned int cpp;
+ int back_offset;
+ int front_offset;
+ int current_page;
+ int page_flipping;
+
+ uint32_t counter;
+};
+
+struct intel_l3_parity {
+ u32 *remap_info;
+ struct work_struct error_work;
+};
+
+struct i915_gem_mm {
+ /** Memory allocator for GTT stolen memory */
+ struct drm_mm stolen;
+ /** Memory allocator for GTT */
+ struct drm_mm gtt_space;
+ /** List of all objects in gtt_space. Used to restore gtt
+ * mappings on resume */
+ struct list_head bound_list;
+ /**
+ * List of objects which are not bound to the GTT (thus
+ * are idle and not used by the GPU) but still have
+ * (presumably uncached) pages still attached.
+ */
+ struct list_head unbound_list;
+
+ /** Usable portion of the GTT for GEM */
+ unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+ int gtt_mtrr;
+
+ /** PPGTT used for aliasing the PPGTT with the GTT */
+ struct i915_hw_ppgtt *aliasing_ppgtt;
+
+ /**
+ * List of objects currently involved in rendering.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /** LRU list of objects with fence regs on them. */
+ struct list_head fence_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct work_struct retire_work;
+ struct timer_list retire_timer;
+
+ /**
+ * Are we in a non-interruptible section of code like
+ * modesetting?
+ */
+ bool interruptible;
+
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int suspended;
+
+ /** Bit 6 swizzling required for X tiling */
+ uint32_t bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ uint32_t bit_6_swizzle_y;
+
+ /* storage for physical objects */
+ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
+
+ /* accounting, useful for userland debugging */
+ size_t object_memory;
+ u32 object_count;
+};
+
+struct drm_i915_error_state_buf {
+ unsigned bytes;
+ unsigned size;
+ int err;
+ u8 *buf;
+ uint64_t start;
+ uint64_t pos;
+};
+
+struct i915_gpu_error {
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+ struct timer_list hangcheck_timer;
+
+ /* For reset and error_state handling. */
+ spinlock_t lock;
+ /* Protected by the above dev->gpu_error.lock. */
+ struct drm_i915_error_state *first_error;
+ struct work_struct work;
+
+ unsigned long last_reset;
+
+ /**
+ * State variable and reset counter controlling the reset flow
+ *
+ * Upper bits are for the reset counter. This counter is used by the
+ * wait_seqno code to race-free noticed that a reset event happened and
+ * that it needs to restart the entire ioctl (since most likely the
+ * seqno it waited for won't ever signal anytime soon).
+ *
+ * This is important for lock-free wait paths, where no contended lock
+ * naturally enforces the correct ordering between the bail-out of the
+ * waiter and the gpu reset work code.
+ *
+ * Lowest bit controls the reset state machine: Set means a reset is in
+ * progress. This state will (presuming we don't have any bugs) decay
+ * into either unset (successful reset) or the special WEDGED value (hw
+ * terminally sour). All waiters on the reset_queue will be woken when
+ * that happens.
+ */
+ atomic_t reset_counter;
+
+ /**
+ * Special values/flags for reset_counter
+ *
+ * Note that the code relies on
+ * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
+ * being true.
+ */
+#define I915_RESET_IN_PROGRESS_FLAG 1
+#define I915_WEDGED 0xffffffff
+
+ /**
+ * Waitqueue to signal when the reset has completed. Used by clients
+ * that wait for dev_priv->mm.wedged to settle.
+ */
+ wait_queue_head_t reset_queue;
+
+ /* For gpu hang simulation. */
+ unsigned int stop_rings;
+};
+
+enum modeset_restore {
+ MODESET_ON_LID_OPEN,
+ MODESET_DONE,
+ MODESET_SUSPENDED,
+};
+
+struct intel_vbt_data {
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+ /* Feature bits */
+ unsigned int int_tv_support:1;
+ unsigned int lvds_dither:1;
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int display_clock_mode:1;
+ unsigned int fdi_rx_polarity_inverted:1;
+ int lvds_ssc_freq;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+
+ /* eDP */
+ int edp_rate;
+ int edp_lanes;
+ int edp_preemphasis;
+ int edp_vswing;
+ bool edp_initialized;
+ bool edp_support;
+ int edp_bpp;
+ struct edp_power_seq edp_pps;
+
+ int crt_ddc_pin;
+
+ int child_dev_num;
+ struct child_device_config *child_dev;
+};
+
+typedef struct drm_i915_private {
+ struct drm_device *dev;
+
+ const struct intel_device_info *info;
+
+ int relative_constants_mode;
+
+ drm_local_map_t *regs;
+ struct drm_i915_gt_funcs gt;
+ /** gt_fifo_count and the subsequent register write are synchronized
+ * with dev->struct_mutex. */
+ unsigned gt_fifo_count;
+ /** forcewake_count is protected by gt_lock */
+ unsigned forcewake_count;
+ /** gt_lock is also taken in irq contexts. */
+ spinlock_t gt_lock;
+
+ struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
+ /** gmbus_mutex protects against concurrent usage of the single hw gmbus
+ * controller on different i2c buses. */
+ struct mutex gmbus_mutex;
+
+ /**
+ * Base address of the gmbus and gpio block.
+ */
+ uint32_t gpio_mmio_base;
+
+ wait_queue_head_t gmbus_wait_queue;
+
+ struct drm_i915_bridge_dev bridge_dev;
+ struct intel_ring_buffer ring[I915_NUM_RINGS];
+ uint32_t last_seqno, next_seqno;
+
+ drm_dma_handle_t *status_page_dmah;
+ uint32_t counter;
+
+ atomic_t irq_received;
+ u32 trace_irq_seqno;
+
+ /* protects the irq masks */
+ spinlock_t irq_lock;
+
+ /* DPIO indirect register protection */
+ spinlock_t dpio_lock;
+
+ /** Cached value of IMR to avoid reads in updating the bitfield */
+ u32 irq_mask;
+ u32 gt_irq_mask;
+
+ struct work_struct hotplug_work;
+ bool enable_hotplug_processing;
+ struct {
+ unsigned long hpd_last_jiffies;
+ int hpd_cnt;
+ enum {
+ HPD_ENABLED = 0,
+ HPD_DISABLED = 1,
+ HPD_MARK_DISABLED = 2
+ } hpd_mark;
+ } hpd_stats[HPD_NUM_PINS];
+ u32 hpd_event_bits;
+ struct timer_list hotplug_reenable_timer;
+
+ int num_plane;
+
+ unsigned long cfb_size;
+ unsigned int cfb_fb;
+ enum plane cfb_plane;
+ int cfb_y;
+ struct intel_fbc_work *fbc_work;
+ struct timer_list fbc_timer;
+
+ struct intel_opregion opregion;
+ struct intel_vbt_data vbt;
+
+ /* overlay */
+ struct intel_overlay *overlay;
+ unsigned int sprite_scaling_enabled;
+
+ /* backlight */
+ struct {
+ int level;
+ bool enabled;
+ spinlock_t lock; /* bl registers and the above bl fields */
+ struct backlight_device *device;
+ } backlight;
+
+ /* LVDS info */
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+ bool no_aux_handshake;
+
+ struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+ int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
+ unsigned int fsb_freq, mem_freq, is_ddr3;
+
+ struct workqueue_struct *wq;
+ struct workqueue_struct *other_wq;
+
+ /* Display functions */
+ struct drm_i915_display_funcs display;
+
+ /* PCH chipset type */
+ enum intel_pch pch_type;
+ unsigned short pch_id;
+
+ unsigned long quirks;
+ bool vt_holding;
+ bool isX;
+ bool gfx_state_saved;
+
+ /* Register state */
+ enum modeset_restore modeset_restore;
+ struct mutex modeset_restore_lock;
+
+ struct i915_gtt gtt;
+
+ struct i915_gem_mm mm;
+
+ /* Kernel Modesetting */
+
+ struct sdvo_device_mapping sdvo_mappings[2];
+
+ struct drm_crtc *plane_to_crtc_mapping[3];
+ struct drm_crtc *pipe_to_crtc_mapping[3];
+ wait_queue_head_t pending_flip_queue;
+
+ int num_shared_dpll;
+ struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ struct intel_ddi_plls ddi_plls;
+
+ /* Reclocking support */
+ bool render_reclock_avail;
+ bool lvds_downclock_avail;
+ /* indicates the reduced downclock for LVDS*/
+ int lvds_downclock;
+ u16 orig_clock;
+
+ bool mchbar_need_disable;
+
+ struct intel_l3_parity l3_parity;
+
+ /* gen6+ rps state */
+ struct intel_gen6_power_mgmt rps;
+
+ /* ilk-only ips/rps state. Everything in here is protected by the global
+ * mchdev_lock in intel_pm.c */
+ struct intel_ilk_power_mgmt ips;
+
+ /* Haswell power well */
+ struct i915_power_well power_well;
+
+ enum no_fbc_reason no_fbc_reason;
+
+ struct drm_mm_node *compressed_fb;
+ struct drm_mm_node *compressed_llb;
+
+ struct timer_list gpu_top_timer;
+ struct i915_gpu_error gpu_error;
+ int gpu_hang;
+ struct drm_i915_gem_object *vlv_pctx;
+
+ /* list of fbdev register on this device */
+ struct intel_fbdev *fbdev;
+
+
+ struct drm_property *broadcast_rgb_property;
+ struct drm_property *force_audio_property;
+
+ bool hw_contexts_disabled;
+ uint32_t hw_context_size;
+
+ u32 fdi_rx_config;
+
+ struct i915_suspend_saved_registers regfile;
+
+ struct drm_i915_gem_object *fbcon_obj;
+
+ /* Old dri1 support infrastructure, beware the dragons ya fools entering
+ * here! */
+ struct i915_dri1_state dri1;
+ struct list_head batch_list;
+} drm_i915_private_t;
+
+/* Iterate over initialised rings */
+#define for_each_ring(ring__, dev_priv__, i__) \
+ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
+ if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
+
+enum hdmi_force_audio {
+ HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
+ HDMI_AUDIO_OFF, /* force turn off HDMI audio */
+ HDMI_AUDIO_AUTO, /* trust EDID */
+ HDMI_AUDIO_ON, /* force turn on HDMI audio */
+};
+
+#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
+
+struct drm_i915_gem_object_ops {
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ int (*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *);
+};
+
+struct drm_i915_gem_object {
+ struct drm_gem_object base;
+
+ const struct drm_i915_gem_object_ops *ops;
+
+ /** Current space allocated to this object in the GTT, if any. */
+ struct drm_mm_node *gtt_space;
+ /** Stolen memory for this object, instead of being backed by shmem. */
+ struct drm_mm_node *stolen;
+ struct list_head global_list;
+
+ /** This object's place on the active/flushing/inactive lists */
+ struct list_head ring_list;
+ struct list_head mm_list;
+ /** This object's place on eviction list */
+ struct list_head exec_list;
+
+ /**
+ * This is set if the object is on the active or flushing lists
+ * (has pending rendering), and is not set if it's on inactive (ready
+ * to be unbound).
+ */
+ unsigned int active;
+
+ /**
+ * This is set if the object has been written to since last bound
+ * to the GTT
+ */
+ unsigned int dirty;
+
+ /**
+ * Fence register bits (if any) for this object. Will be set
+ * as needed when mapped into the GTT.
+ * Protected by dev->struct_mutex.
+ */
+ signed int fence_reg;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv;
+
+ /**
+ * Current tiling mode for the object.
+ */
+ unsigned int tiling_mode : 2;
+ /**
+ * Whether the tiling parameters for the currently associated fence
+ * register have changed. Note that for the purposes of tracking
+ * tiling changes we also treat the unfenced register, the register
+ * slot that the object occupies whilst it executes a fenced
+ * command (such as BLT on gen2/3), as a "fence".
+ */
+ unsigned int fence_dirty:1;
+
+ /** How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, pin_ioctl
+ * (via user_pin_count), execbuffer (objects are not allowed multiple
+ * times for the same batchbuffer), and the framebuffer code. When
+ * switching/pageflipping, the framebuffer code has at most two buffers
+ * pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits. */
+ unsigned int pin_count;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
+
+ /**
+ * Is the object at the current location in the gtt mappable and
+ * fenceable? Used to avoid costly recalculations.
+ */
+ unsigned int map_and_fenceable;
+ int agp_mem;
+ /**
+ * Whether the current gtt mapping needs to be mappable (and isn't just
+ * mappable by accident). Track pin and fault separate for a more
+ * accurate mappable working set.
+ */
+ unsigned int fault_mappable;
+ unsigned int pin_mappable;
+
+ /*
+ * Is the GPU currently using a fence to access this buffer,
+ */
+ unsigned int pending_fenced_gpu_access;
+ unsigned int fenced_gpu_access;
+
+ unsigned int cache_level;
+
+ unsigned int has_aliasing_ppgtt_mapping;
+ unsigned int has_global_gtt_mapping;
+ unsigned int has_dma_mapping;
+
+ caddr_t *page_list;
+ int pages_pin_count;
+
+ /**
+ * DMAR support
+ */
+ struct scatterlist *sg_list;
+ int num_sg;
+
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ unsigned long exec_handle;
+ struct drm_i915_gem_exec_object2 *exec_entry;
+
+ /**
+ * Current offset of the object in GTT space.
+ *
+ * This is the same as gtt_space->start
+ */
+ uint32_t gtt_offset;
+
+ struct intel_ring_buffer *ring;
+
+ /**
+ * Fake offset for use by mmap(2)
+ */
+ uint64_t mmap_offset;
+
+ /** Breadcrumb of last rendering to the buffer. */
+ uint32_t last_read_seqno;
+ uint32_t last_write_seqno;
+ /** Breadcrumb of last fenced GPU access to the buffer. */
+ uint32_t last_fenced_seqno;
+
+ /** Current tiling mode for the object. */
+ uint32_t stride;
+
+ /** Record of address bit 17 of each page at last unbind. */
+ unsigned long *bit_17;
+
+ /** User space pin count and filp owning the pin */
+ uint32_t user_pin_count;
+ struct drm_file *pin_filp;
+
+ /** for phy allocated objects */
+ struct drm_i915_gem_phys_object *phys_obj;
+
+ /** OSOL: for cursor objects */
+ u8 is_cursor;
+};
+#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
+
+#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable
+ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
+ * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ */
+struct drm_i915_gem_request {
+ /** On Which ring this request was generated */
+ struct intel_ring_buffer *ring;
+
+ /** GEM sequence number associated with this request. */
+ uint32_t seqno;
+
+ /** Position in the ringbuffer of the start of the request */
+ u32 head;
+
+ /** Postion in the ringbuffer of the end of the request */
+ u32 tail;
+
+ /** Context related to this request */
+ struct i915_hw_context *ctx;
+
+ /** Batch buffer related to this request if any */
+ struct drm_i915_gem_object *batch_obj;
+
+ /** Time at which this request was emitted, in jiffies. */
+ unsigned long emitted_jiffies;
+
+ /** global list entry for this request */
+ struct list_head list;
+
+ struct drm_i915_file_private *file_priv;
+ /** file_priv list entry for this request */
+ struct list_head client_list;
+};
+
+struct drm_i915_file_private {
+ struct {
+ spinlock_t lock;
+ struct list_head request_list;
+ } mm;
+ /** 1 open, 0 close*/
+ int status;
+ struct idr context_idr;
+
+ struct i915_ctx_hang_stats hang_stats;
+};
+
+#if defined(__sun)
+/* These definitions conflict with those in x86_archext.h */
+#undef IS_IVYBRIDGE
+#undef IS_HASWELL
+#endif
+
+#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
+#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
+ (dev)->pci_device == 0x0152 || \
+ (dev)->pci_device == 0x015a)
+#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
+ (dev)->pci_device == 0x0106 || \
+ (dev)->pci_device == 0x010A)
+#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
+#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
+#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+#define IS_ULT(dev) (IS_HASWELL(dev) && \
+ ((dev)->pci_device & 0xFF00) == 0x0A00)
+
+/*
+ * The genX designation typically refers to the render engine, so render
+ * capability related checks should use IS_GEN, while display and other checks
+ * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
+ * chips, etc.).
+ */
+#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
+#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
+
+#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
+#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
+#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+
+#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
+
+#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+
+/* Early gen2 have a totally busted CS tlb and require pinned batches. */
+#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+ IS_I915GM(dev)))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+/* dsparb controlled by hw only */
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_IPS(dev) (IS_ULT(dev))
+
+#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
+
+#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
+#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
+#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
+
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
+#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+
+#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
+
+#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+
+#define GT_FREQUENCY_MULTIPLIER 50
+
+
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage. This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE (1<<0)
+#define INTEL_RC6p_ENABLE (1<<1)
+#define INTEL_RC6pp_ENABLE (1<<2)
+
+extern struct drm_ioctl_desc i915_ioctls[];
+extern int i915_max_ioctl;
+extern unsigned int i915_fbpercrtc;
+extern int i915_panel_ignore_lid;
+extern unsigned int i915_powersave;
+extern int i915_semaphores;
+extern unsigned int i915_lvds_downclock;
+extern int i915_lvds_channel_mode;
+extern int i915_panel_use_ssc;
+extern int i915_vbt_sdvo_panel_type;
+extern int i915_enable_rc6;
+extern int i915_enable_fbc;
+extern bool i915_enable_hangcheck;
+extern bool i915_try_reset;
+extern int i915_enable_ppgtt;
+extern int i915_disable_power_well;
+extern int i915_enable_ips;
+
+extern int i915_suspend(struct drm_device *dev);
+extern int i915_resume(struct drm_device *dev);
+extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
+extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
+extern void i915_driver_entervt(struct drm_device *dev);
+extern void i915_driver_leavevt(struct drm_device *dev);
+extern void i915_driver_agp_support_detect(struct drm_device *dev, unsigned long flags);
+
+/* i915_dma.c */
+void i915_update_dri1_breadcrumb(struct drm_device *dev);
+extern void i915_kernel_lost_context(struct drm_device * dev);
+extern int i915_driver_load(struct drm_device *, unsigned long flags);
+extern int i915_driver_unload(struct drm_device *);
+extern int i915_driver_firstopen(struct drm_device *dev);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
+extern void i915_driver_lastclose(struct drm_device * dev);
+extern void i915_driver_preclose(struct drm_device *dev,
+ struct drm_file *file_priv);
+extern void i915_driver_postclose(struct drm_device *dev,
+ struct drm_file *file_priv);
+extern int i915_driver_device_is_agp(struct drm_device * dev);
+#ifdef CONFIG_COMPAT
+extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+#endif
+extern int i915_emit_box(struct drm_device *dev,
+ struct drm_clip_rect *box,
+ int DR1, int DR4);
+extern int intel_gpu_reset(struct drm_device *dev);
+extern int i915_reset(struct drm_device *dev);
+
+extern void intel_console_resume(struct work_struct *work);
+
+extern void i915_emit_mi_flush(drm_device_t *dev, uint32_t flush);
+extern int i915_bridge_dev_read_config_word(struct drm_i915_bridge_dev *bridge_dev, int where, u16 *val);
+extern int i915_bridge_dev_write_config_word(struct drm_i915_bridge_dev *bridge_dev, int where, u16 val);
+
+extern void gpu_top_handler(void *data);
+/* i915_irq.c */
+void i915_hangcheck_elapsed(void* data);
+void i915_handle_error(struct drm_device *dev, bool wedged);
+
+extern void intel_irq_init(struct drm_device *dev);
+extern void intel_pm_init(struct drm_device *dev);
+extern void intel_hpd_init(struct drm_device *dev);
+extern void intel_gt_init(struct drm_device *dev);
+extern void intel_gt_sanitize(struct drm_device *dev);
+
+void i915_error_state_free(struct kref *error_ref);
+
+void
+i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+
+void
+i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+
+#ifdef CONFIG_DEBUG_FS
+extern void i915_destroy_error_state(struct drm_device *dev);
+#else
+#define i915_destroy_error_state(x)
+#endif
+
+/* i915_gem.c */
+int i915_gem_init_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_create_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_pread_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_pwrite_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_mmap_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_mmap_gtt_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_set_domain_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_sw_finish_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_execbuffer(DRM_IOCTL_ARGS);
+int i915_gem_execbuffer2(DRM_IOCTL_ARGS);
+int i915_gem_pin_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_unpin_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_busy_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_get_caching_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_set_caching_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_throttle_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_madvise_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_entervt_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_leavevt_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_set_tiling(DRM_IOCTL_ARGS);
+int i915_gem_get_tiling(DRM_IOCTL_ARGS);
+int i915_gem_get_aperture_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_wait_ioctl(DRM_IOCTL_ARGS);
+void i915_gem_load(struct drm_device *dev);
+int i915_gem_init_object(struct drm_gem_object *obj);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size);
+void i915_gem_free_object(struct drm_gem_object *obj);
+int i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable,
+ bool nonblocking);
+void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj, uint32_t type);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_lastclose(struct drm_device *dev);
+
+static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->page_list == NULL);
+ obj->pages_pin_count++;
+}
+static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->pages_pin_count == 0);
+ obj->pages_pin_count--;
+}
+
+int i915_mutex_lock_interruptible(struct drm_device *dev);
+int i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring);
+
+int i915_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle);
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+ return (int32_t)(seq1 - seq2) >= 0;
+}
+
+int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
+int i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
+int i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
+static inline bool
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ dev_priv->fence_regs[obj->fence_reg].pin_count++;
+ return true;
+ } else
+ return false;
+}
+
+static inline void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ dev_priv->fence_regs[obj->fence_reg].pin_count--;
+ }
+}
+
+void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
+int i915_gem_check_wedge(struct i915_gpu_error *error,
+ bool interruptible);
+static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
+{
+ return (atomic_read(&error->reset_counter)
+ & I915_RESET_IN_PROGRESS_FLAG);
+}
+
+static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+{
+ return atomic_read(&error->reset_counter) == I915_WEDGED;
+}
+
+void i915_gem_reset(struct drm_device *dev);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
+int i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
+int i915_gem_init(struct drm_device *dev);
+int i915_gem_init_hw(struct drm_device *dev);
+void i915_gem_l3_remap(struct drm_device *dev);
+void i915_gem_init_swizzling(struct drm_device *dev);
+void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+int i915_gpu_idle(struct drm_device *dev);
+int i915_gem_idle(struct drm_device *dev, uint32_t type);
+int __i915_add_request(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct drm_i915_gem_object *batch_obj,
+ u32 *seqno);
+#define i915_add_request(ring, seqno) \
+ __i915_add_request(ring, NULL, NULL, seqno)
+int i915_wait_seqno(struct intel_ring_buffer *ring,
+ uint32_t seqno);
+void i915_gem_fault(struct drm_gem_object *obj);
+int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+ bool write);
+int
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
+int
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
+ struct intel_ring_buffer *pipelined);
+int i915_gem_attach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ int id,
+ int align);
+void i915_gem_detach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *obj);
+void i915_gem_free_all_phys_object(struct drm_device *dev);
+void i915_gem_release(struct drm_device *dev, struct drm_file *file);
+
+uint32_t
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+ int tiling_mode, bool fenced);
+
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+void i915_gem_restore_fences(struct drm_device *dev);
+
+/* i915_gem_context.c */
+void i915_gem_context_init(struct drm_device *dev);
+void i915_gem_context_fini(struct drm_device *dev);
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
+int i915_switch_context(struct intel_ring_buffer *ring,
+ struct drm_file *file, int to_id);
+void i915_gem_context_free(struct kref *ctx_ref);
+void i915_gem_context_reference(struct i915_hw_context *ctx);
+void i915_gem_context_unreference(struct i915_hw_context *ctx);
+
+struct i915_ctx_hang_stats *
+i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ u32 id);
+int i915_gem_context_create_ioctl(DRM_IOCTL_ARGS);
+int i915_gem_context_destroy_ioctl(DRM_IOCTL_ARGS);
+
+/* i915_gem_gtt.c */
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj);
+
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj, uint32_t type);
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+void i915_gem_init_global_gtt(struct drm_device *dev);
+void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
+ unsigned long mappable_end, unsigned long end);
+int setup_scratch_page(struct drm_device *dev);
+void teardown_scratch_page(struct drm_device *dev);
+int i915_gem_gtt_init(struct drm_device *dev);
+void intel_rw_gtt(struct drm_device *dev, size_t size,
+ uint32_t gtt_offset, void *gttp, uint32_t type);
+void i915_clean_gtt(struct drm_device *dev, size_t offset);
+void i915_gem_chipset_flush(struct drm_device *dev);
+
+/* i915_gem_evict.c */
+int i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment,
+ unsigned cache_level,
+ bool mappable,
+ bool nonblock);
+int i915_gem_evict_everything(struct drm_device *dev);
+
+/* i915_gem_stolen.c */
+int i915_gem_init_stolen(struct drm_device *dev);
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
+void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
+void i915_gem_cleanup_stolen(struct drm_device *dev);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+ u32 stolen_offset,
+ u32 gtt_offset,
+ u32 size);
+void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
+
+/* i915_gem_tiling.c */
+inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+
+ return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ obj->tiling_mode != I915_TILING_NONE;
+}
+
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+
+/* i915_gem_debug.c */
+void i915_gem_command_decode(uint32_t *data, int count,
+ uint32_t hw_offset, struct drm_device *dev);
+void register_dump(struct drm_device *dev);
+void gtt_dump(struct drm_device *dev);
+void ring_dump(struct drm_device *dev, struct intel_ring_buffer *ring);
+
+#if WATCH_LISTS
+int i915_verify_lists(struct drm_device *dev);
+#else
+#define i915_verify_lists(dev) 0
+#endif
+void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
+ int handle);
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
+ const char *where, uint32_t mark);
+
+/* i915_suspend.c */
+extern int i915_save_state(struct drm_device *dev);
+extern int i915_restore_state(struct drm_device *dev);
+
+/* i915_ums.c */
+void i915_save_display_reg(struct drm_device *dev);
+void i915_restore_display_reg(struct drm_device *dev);
+
+/* intel_i2c.c */
+extern int intel_setup_gmbus(struct drm_device *dev);
+extern void intel_teardown_gmbus(struct drm_device *dev);
+static inline bool intel_gmbus_is_port_valid(unsigned port)
+{
+ return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
+}
+
+extern struct i2c_adapter *intel_gmbus_get_adapter(
+ struct drm_i915_private *dev_priv, unsigned port);
+extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+{
+ return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
+}
+extern void intel_i2c_reset(struct drm_device *dev);
+extern void intel_gmbus_hdmi_set_adapter(struct i2c_adapter *adapter);
+
+/* modesetting */
+extern void intel_modeset_init_hw(struct drm_device *dev);
+extern void intel_modeset_suspend_hw(struct drm_device *dev);
+extern void intel_modeset_init(struct drm_device *dev);
+extern void intel_modeset_gem_init(struct drm_device *dev);
+extern void intel_modeset_cleanup(struct drm_device *dev);
+extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore);
+extern void i915_redisable_vga(struct drm_device *dev);
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_disable_fbc(struct drm_device *dev);
+extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void intel_init_pch_refclk(struct drm_device *dev);
+extern void gen6_set_rps(struct drm_device *dev, u8 val);
+extern void valleyview_set_rps(struct drm_device *dev, u8 val);
+extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
+extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
+extern void intel_detect_pch (struct drm_device *dev);
+extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
+extern int intel_enable_rc6(const struct drm_device *dev);
+extern void intel_increase_pllclock(struct drm_crtc *crtc);
+
+extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+int i915_reg_read_ioctl(DRM_IOCTL_ARGS);
+
+/* overlay */
+#ifdef CONFIG_DEBUG_FS
+extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+
+extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern void intel_display_print_error_state(struct seq_file *m,
+ struct drm_device *dev,
+ struct intel_display_error_state *error);
+#endif
+
+/* On SNB platform, before reading ring registers forcewake bit
+ * must be set to prevent GT core from power down and stale values being
+ * returned.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+
+/* intel_sideband.c */
+u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
+void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
+u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
+void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
+u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+ enum intel_sbi_destination destination);
+void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+ enum intel_sbi_destination destination);
+
+int vlv_gpu_freq(int ddr_freq, int val);
+int vlv_freq_opcode(int ddr_freq, int val);
+
+#define __i915_read(x) \
+u ## x i915_read ## x(struct drm_i915_private *dev_priv, u32 reg);
+
+__i915_read(8)
+__i915_read(16)
+__i915_read(32)
+__i915_read(64)
+#undef __i915_read
+
+#define __i915_write(x) \
+void i915_write ## x(struct drm_i915_private *dev_priv, u32 reg, \
+ u ## x val);
+
+__i915_write(8)
+__i915_write(16)
+__i915_write(32)
+__i915_write(64)
+#undef __i915_write
+
+#define I915_READ(reg) i915_read32(dev_priv, (reg))
+#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (u32)(val))
+#define I915_READ_NOTRACE(reg) DRM_READ32(dev_priv->regs, (reg))
+#define I915_WRITE_NOTRACE(reg, val) DRM_WRITE32(dev_priv->regs, (reg), (val))
+#define I915_READ16(reg) i915_read16(dev_priv, (reg))
+#define I915_WRITE16(reg,val) i915_write16(dev_priv, (reg), (u16)(val))
+#define I915_READ16_NOTRACE(reg) DRM_READ16(dev_priv->regs, (reg))
+#define I915_WRITE16_NOTRACE(reg, val) DRM_WRITE16(dev_priv->regs, (reg), (val))
+#define I915_READ8(reg) i915_read8(dev_priv, (reg))
+#define I915_WRITE8(reg,val) i915_write8(dev_priv, (reg), (u8)(val))
+#define I915_WRITE64(reg,val) i915_write64(dev_priv, (reg), (u64)(val))
+#define I915_READ64(reg) i915_read64(dev_priv, (reg))
+#define POSTING_READ(reg) (void)DRM_READ32(dev_priv->regs, (reg))
+#define POSTING_READ16(reg) (void)DRM_READ16(dev_priv->regs, (reg))
+#define POSTING_READ8(reg) (void)DRM_READ8(dev_priv->regs, (reg))
+
+/* "Broadcast RGB" property */
+#define INTEL_BROADCAST_RGB_AUTO 0
+#define INTEL_BROADCAST_RGB_FULL 1
+#define INTEL_BROADCAST_RGB_LIMITED 2
+
+#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
+#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
+#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560
+#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562
+#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582
+#define PCI_DEVICE_ID_INTEL_82865_IG 0x2572
+
+static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
+{
+ if (HAS_PCH_SPLIT(dev))
+ return CPU_VGACNTRL;
+ else if (IS_VALLEYVIEW(dev))
+ return VLV_VGACNTRL;
+ else
+ return VGACNTRL;
+}
+
+#endif /* _I915_DRV_H_ */
diff --git a/usr/src/uts/intel/io/i915/i915_gem.c b/usr/src/uts/intel/io/i915/i915_gem.c
new file mode 100644
index 0000000..de59d85
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_gem.c
@@ -0,0 +1,3988 @@
+/*
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_mm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
+static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment,
+ bool map_and_fenceable,
+ bool nonblocking);
+static int i915_gem_phys_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file);
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj);
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable);
+
+int i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ /* As we do not have an associated fence register, we will force
+ * a tiling change if we ever need to acquire one.
+ */
+ obj->fence_dirty = false;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
+/* some bookkeeping */
+static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.object_count++;
+ dev_priv->mm.object_memory += size;
+}
+
+static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
+ size_t size)
+{
+ dev_priv->mm.object_count--;
+ dev_priv->mm.object_memory -= size;
+}
+
+static int
+i915_gem_wait_for_error(struct i915_gpu_error *error)
+{
+
+#define EXIT_COND (!i915_reset_in_progress(error) || \
+ i915_terminally_wedged(error))
+ if (EXIT_COND)
+ return 0;
+
+ /*
+ * Only wait 10 seconds for the gpu reset to complete to avoid hanging
+ * userspace. If it takes that long something really bad is going on and
+ * we should simply try to bail out and fail as gracefully as possible.
+ */
+ if (wait_for(EXIT_COND, 10*1000)) {
+ DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
+ return -EIO;
+ }
+#undef EXIT_COND
+
+ return 0;
+}
+
+int
+i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
+ if (ret)
+ return ret;
+
+ /* fix me mutex_lock_interruptible */
+ mutex_lock(&dev->struct_mutex);
+
+ WARN_ON(i915_verify_lists(dev));
+ return 0;
+}
+
+static inline bool
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
+{
+ return obj->gtt_space && !obj->active;
+}
+
+int
+/* LINTED */
+i915_gem_init_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_init *args = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (args->gtt_start >= args->gtt_end ||
+ (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ /* GEM with user mode setting was never supported on ilk and later. */
+ if (INTEL_INFO(dev)->gen >= 5)
+ return -ENODEV;
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
+ args->gtt_end);
+ dev_priv->gtt.mappable_end = args->gtt_end;
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+int
+/* LINTED */
+i915_gem_get_aperture_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_get_aperture *args = data;
+ struct drm_i915_gem_object *obj;
+ size_t pinned;
+
+ pinned = 0;
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry(obj, struct drm_i915_gem_object, &dev_priv->mm.bound_list, global_list)
+ if (obj->pin_count)
+ pinned += obj->gtt_space->size;
+ mutex_unlock(&dev->struct_mutex);
+
+ args->aper_size = dev_priv->gtt.total;
+ args->aper_available_size = args->aper_size -pinned;
+
+ return 0;
+}
+
+void *i915_gem_object_alloc(struct drm_device *dev)
+{
+ return NULL;
+}
+
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+}
+
+static int
+i915_gem_create(struct drm_file *file,
+ struct drm_device *dev,
+ uint64_t size,
+ uint32_t *handle_p)
+{
+ struct drm_i915_gem_object *obj;
+ int ret;
+ u32 handle;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ /* Allocate the new object */
+ obj = i915_gem_alloc_object(dev, size);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
+ if (ret) {
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
+ i915_gem_object_free(obj);
+ return ret;
+ }
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference(&obj->base);
+
+ *handle_p = handle;
+ return 0;
+}
+
+int
+i915_gem_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ /* have to work out size/pitch and return them */
+ args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+ args->size = args->pitch * args->height;
+ return i915_gem_create(file, dev,
+ args->size, &args->handle);
+}
+
+int i915_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+/* LINTED */
+i915_gem_create_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_create *args = data;
+ return i915_gem_create(file, dev,
+ args->size, &args->handle);
+}
+
+static inline void
+slow_shmem_bit17_copy(caddr_t gpu_page,
+ int gpu_offset,
+ uint32_t *cpu_page,
+ int cpu_offset,
+ int length,
+ int is_read)
+{
+
+ int ret;
+ /* Use the unswizzled path if this page isn't affected. */
+ if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
+ if (is_read)
+ ret = DRM_COPY_TO_USER(cpu_page + cpu_offset,
+ gpu_page + gpu_offset, length);
+ else
+ ret = DRM_COPY_FROM_USER(gpu_page + gpu_offset,
+ cpu_page + cpu_offset, length);
+ if (ret)
+ DRM_ERROR("slow_shmem_bit17_copy unswizzled path failed, ret = %d", ret);
+ return;
+ }
+
+ /* Copy the data, XORing A6 with A17 (1). The user already knows he's
+ * XORing with the other bits (A9 for Y, A9 and A10 for X)
+ */
+ while (length > 0) {
+ int cacheline_end = ALIGN(gpu_offset + 1, 64);
+ int this_length = min(cacheline_end - gpu_offset, length);
+ int swizzled_gpu_offset = gpu_offset ^ 64;
+
+ if (is_read) {
+ ret = DRM_COPY_TO_USER(cpu_page + cpu_offset,
+ gpu_page + swizzled_gpu_offset,
+ this_length);
+ } else {
+ ret = DRM_COPY_FROM_USER(gpu_page + swizzled_gpu_offset,
+ cpu_page + cpu_offset,
+ this_length);
+ }
+ cpu_offset += this_length;
+ gpu_offset += this_length;
+ length -= this_length;
+ }
+ if (ret)
+ DRM_ERROR("slow_shmem_bit17_copy failed, ret = %d", ret);
+
+}
+
+int
+/* LINTED */
+i915_gem_shmem_pread(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pread *args,
+ struct drm_file *file_priv)
+{
+ ssize_t remain, page_length;
+ uint32_t offset;
+ uint64_t first_data_page;
+ int shmem_page_index, shmem_page_offset;
+ int data_page_index, data_page_offset;
+ int ret = 0;
+ uint64_t data_ptr = args->data_ptr;
+ int do_bit17_swizzling;
+ int needs_clflush = 0;
+ uint32_t *user_data = (uint32_t *)(uintptr_t)args->data_ptr;
+
+ remain = args->size;
+
+ do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
+ /* If we're not in the cpu read domain, set ourself into the gtt
+ * read domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will dirty the data
+ * anyway again before the next pread happens. */
+ if (obj->cache_level == I915_CACHE_NONE)
+ needs_clflush = 1;
+ if (obj->gtt_space) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
+ first_data_page = data_ptr / PAGE_SIZE;
+
+ offset = args->offset;
+
+ if (needs_clflush)
+ i915_gem_clflush_object(obj);
+
+ if (do_bit17_swizzling) {
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * shmem_page_index = page number within shmem file
+ * shmem_page_offset = offset within page in shmem file
+ * data_page_index = page number in get_user_pages return
+ * data_page_offset = offset with data_page_index page.
+ * page_length = bytes to copy for this page
+ */
+ shmem_page_index = offset / DRM_PAGE_SIZE;
+ shmem_page_offset = offset & ~DRM_PAGE_MASK;
+ data_page_index = data_ptr / DRM_PAGE_SIZE - first_data_page;
+ data_page_offset = data_ptr & ~DRM_PAGE_MASK;
+
+ page_length = remain;
+ if ((shmem_page_offset + page_length) > DRM_PAGE_SIZE)
+ page_length = PAGE_SIZE - shmem_page_offset;
+ if ((data_page_offset + page_length) > DRM_PAGE_SIZE)
+ page_length = PAGE_SIZE - data_page_offset;
+
+ slow_shmem_bit17_copy(obj->page_list[shmem_page_index],
+ shmem_page_offset,
+ user_data + data_page_index * DRM_PAGE_SIZE,
+ data_page_offset,
+ page_length,
+ 1);
+
+ remain -= page_length;
+ data_ptr += page_length;
+ offset += page_length;
+ }
+ } else {
+ ret = DRM_COPY_TO_USER((caddr_t)user_data,
+ obj->base.kaddr + args->offset,
+ args->size);
+ if (ret)
+ DRM_ERROR("shmem_pread_copy failed, ret = %d", ret);
+ }
+ i915_gem_object_unpin_pages(obj);
+ return ret;
+}
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+int
+/* LINTED */
+i915_gem_pread_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_pread *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret = 0;
+
+ if (args->size == 0)
+ return 0;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ /* Bounds check source. */
+ if (args->offset > obj->base.size ||
+ args->size > obj->base.size - args->offset) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* prime objects have no backing filp to GEM pread/pwrite
+ * pages from.
+ */
+ ret = i915_gem_shmem_pread(dev, obj, args, file);
+
+ TRACE_GEM_OBJ_HISTORY(obj, "pread");
+
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static int
+i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ /* LINTED */
+ struct drm_file *file_priv)
+{
+ uint32_t *user_data;
+ int ret = 0;
+ ret = i915_gem_object_pin(obj, 0, true, true);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto out_unpin;
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto out_unpin;
+ user_data = (uint32_t *)(uintptr_t)args->data_ptr;
+
+ ret = DRM_COPY_FROM_USER(obj->base.kaddr + args->offset, user_data, args->size);
+ if (ret) {
+ DRM_ERROR("copy_from_user failed, ret = %d", ret);
+ return ret;
+ }
+
+out_unpin:
+ i915_gem_object_unpin(obj);
+out:
+ return ret;
+}
+
+int
+i915_gem_shmem_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ /* LINTED */
+ struct drm_file *file_priv)
+{
+ ssize_t remain, page_length;
+ uint32_t offset;
+ uint64_t first_data_page;
+ int shmem_page_index, shmem_page_offset;
+ int data_page_index, data_page_offset;
+ int ret = 0;
+ uint64_t data_ptr = args->data_ptr;
+ int needs_clflush_after = 0;
+ int needs_clflush_before = 0;
+ int do_bit17_swizzling;
+ uint32_t *user_data = (uint32_t *)(uintptr_t)args->data_ptr;
+
+ remain = args->size;
+
+ /* Pin the user pages containing the data. We can't fault while
+ * holding the struct mutex, and all of the pwrite implementations
+ * want to hold it while dereferencing the user data.
+ */
+ first_data_page = data_ptr / PAGE_SIZE;
+
+ do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ /* If we're not in the cpu write domain, set ourself into the gtt
+ * write domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will use the data
+ * right away and we therefore have to clflush anyway. */
+ if (obj->cache_level == I915_CACHE_NONE)
+ needs_clflush_after = 1;
+ if (obj->gtt_space) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+ }
+ }
+ /* Same trick applies for invalidate partially written cachelines before
+ * writing. */
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
+ && obj->cache_level == I915_CACHE_NONE)
+ needs_clflush_before = 1;
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
+ if (needs_clflush_before)
+ i915_gem_clflush_object(obj);
+
+ offset = args->offset;
+ obj->dirty = 1;
+
+ if (do_bit17_swizzling) {
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * shmem_page_index = page number within shmem file
+ * shmem_page_offset = offset within page in shmem file
+ * data_page_index = page number in get_user_pages return
+ * data_page_offset = offset with data_page_index page.
+ * page_length = bytes to copy for this page
+ */
+ shmem_page_index = offset / DRM_PAGE_SIZE;
+ shmem_page_offset = offset & ~DRM_PAGE_MASK;
+ data_page_index = data_ptr / DRM_PAGE_SIZE - first_data_page;
+ data_page_offset = data_ptr & ~DRM_PAGE_MASK;
+
+ page_length = remain;
+ if ((shmem_page_offset + page_length) > DRM_PAGE_SIZE)
+ page_length = PAGE_SIZE - shmem_page_offset;
+ if ((data_page_offset + page_length) > DRM_PAGE_SIZE)
+ page_length = PAGE_SIZE - data_page_offset;
+
+ slow_shmem_bit17_copy(obj->page_list[shmem_page_index],
+ shmem_page_offset,
+ user_data + data_page_index * DRM_PAGE_SIZE,
+ data_page_offset,
+ page_length,
+ 0);
+
+ remain -= page_length;
+ data_ptr += page_length;
+ offset += page_length;
+ }
+ } else {
+ ret = DRM_COPY_FROM_USER(obj->base.kaddr + args->offset,
+ (caddr_t)user_data,
+ args->size);
+ if (ret)
+ DRM_ERROR("shmem_pwrite_copy failed, ret = %d", ret);
+ }
+
+ i915_gem_object_unpin_pages(obj);
+
+ if (needs_clflush_after)
+ i915_gem_chipset_flush(dev);
+ return ret;
+}
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+int
+/* LINTED */
+i915_gem_pwrite_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_pwrite *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ if (args->size == 0)
+ return 0;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ /* Bounds check destination. */
+ if (args->offset > obj->base.size ||
+ args->size > obj->base.size - args->offset) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* prime objects have no backing filp to GEM pread/pwrite
+ * pages from.
+ */
+ TRACE_GEM_OBJ_HISTORY(obj, "pwrite");
+ ret = -EFAULT;
+ /* We can only do the GTT pwrite on untiled buffers, as otherwise
+ * it would end up going through the fenced access, and we'll get
+ * different detiling behavior between reading and writing.
+ * pread/pwrite currently are reading and writing from the CPU
+ * perspective, requiring manual detiling by the client.
+ */
+ if (obj->phys_obj) {
+ ret = i915_gem_phys_pwrite(dev, obj, args, file);
+ goto out;
+ }
+
+ if (obj->cache_level == I915_CACHE_NONE &&
+ obj->tiling_mode == I915_TILING_NONE &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
+ /* Note that the gtt paths might fail with non-page-backed user
+ * pointers (e.g. gtt mappings when moving data between
+ * textures). Fallback to the shmem path in that case. */
+
+ /* Flushing cursor object */
+ if (obj->is_cursor)
+ i915_gem_clflush_object(obj);
+ }
+
+ if (ret == -EFAULT || ret == -ENOSPC)
+ ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+i915_gem_check_wedge(struct i915_gpu_error *error,
+ bool interruptible)
+{
+ if (i915_reset_in_progress(error)) {
+ /* Non-interruptible callers can't handle -EAGAIN, hence return
+ * -EIO unconditionally for these. */
+ if (!interruptible)
+ return -EIO;
+
+ /* Recovery complete, but the reset failed ... */
+ if (i915_terminally_wedged(error))
+ return -EIO;
+
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+static int
+i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+{
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+ ret = 0;
+ if (seqno == ring->outstanding_lazy_request)
+ ret = i915_add_request(ring, NULL);
+
+ return ret;
+}
+
+/**
+ * __wait_seqno - wait until execution of seqno has finished
+ * @ring: the ring expected to report seqno
+ * @seqno: duh!
+ * @reset_counter: reset sequence associated with the given seqno
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Note: It is of utmost importance that the passed in seqno and reset_counter
+ * values have been read by the caller in an smp safe manner. Where read-side
+ * locks are involved, it is sufficient to read the reset_counter before
+ * unlocking the lock that protects the seqno. For lockless tricks, the
+ * reset_counter _must_ be read before, and an appropriate smp_rmb must be
+ * inserted.
+ *
+ * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
+static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+ unsigned reset_counter,
+ bool interruptible, clock_t timeout)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ clock_t wait_time = timeout;
+ bool wait_forever = false;
+ int ret = 0, end = 0;
+
+ if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+ return 0;
+
+ if (wait_time == 0) {
+ wait_time = 3 * DRM_HZ;
+ }
+
+ if (!ring->irq_get(ring))
+ return -ENODEV;
+
+#define EXIT_COND \
+ (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
+ i915_reset_in_progress(&dev_priv->gpu_error) || \
+ reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ do {
+ /* busy check is faster than cv wait on gen6+ */
+ if (IS_GEN6(ring->dev)) {
+ if (wait_for(EXIT_COND, jiffies_to_msecs(wait_time)))
+ ret = -EBUSY;
+ } else if (IS_GEN7(ring->dev) && !IS_HASWELL(ring->dev)) {
+ /*
+ * Frequently read CS register may cause my GEN7 platform hang,
+ * but it's crucial for missed IRQ issue.
+ * So the first wait busy check the seqno,
+ * the second wait force correct ordering
+ * between irq and seqno writes then check again.
+ */
+ u32 *regs = ring->status_page.page_addr;
+ if (wait_for(i915_seqno_passed(regs[I915_GEM_HWS_INDEX],
+ seqno) || i915_reset_in_progress(&dev_priv->gpu_error) ||
+ reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter),
+ 2500)) {
+ if (wait_for(i915_seqno_passed(ring->get_seqno(ring, false),
+ seqno) || i915_reset_in_progress(&dev_priv->gpu_error) ||
+ reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter),
+ 500)) {
+ ret = -EBUSY;
+ }
+ }
+ } else {
+ DRM_WAIT(ret, &ring->irq_queue, EXIT_COND);
+ }
+
+ /* We need to check whether any gpu reset happened in between
+ * the caller grabbing the seqno and now ... */
+ if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ ret = -EAGAIN;
+
+ /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
+ * gone. */
+ end = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+ if (end)
+ ret = end;
+ } while (end == 0 && wait_forever);
+
+
+ ring->irq_put(ring);
+#undef EXIT_COND
+ if (ret) {
+ if ((gpu_dump > 0) && !IS_GEN7(ring->dev)) {
+ ring_dump(ring->dev, ring);
+ register_dump(ring->dev);
+ gtt_dump(ring->dev);
+ }
+ DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
+ __func__, ret, seqno, ring->get_seqno(ring, true),
+ dev_priv->next_seqno);
+ }
+
+ return (ret);
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool interruptible = dev_priv->mm.interruptible;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(seqno == 0);
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ return __wait_seqno(ring, seqno,
+ atomic_read(&dev_priv->gpu_error.reset_counter),
+ interruptible, NULL);
+}
+
+static int
+i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring)
+{
+ i915_gem_retire_requests_ring(ring);
+
+ /* Manually manage the write flush as we may have not yet
+ * retired the buffer.
+ *
+ * Note that the last_write_seqno is always the earlier of
+ * the two (read/write) seqno, so if we haved successfully waited,
+ * we know we have passed the last write.
+ */
+ obj->last_write_seqno = 0;
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+
+ return 0;
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool readonly)
+{
+ struct intel_ring_buffer *ring = obj->ring;
+ u32 seqno;
+ int ret;
+
+ seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
+
+ ret = i915_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
+
+ return i915_gem_object_wait_rendering__tail(obj, ring);
+}
+
+/* A nonblocking variant of the above wait. This is a highly dangerous routine
+ * as the object state may change during this call.
+ */
+static int
+i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+ bool readonly)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = obj->ring;
+ unsigned reset_counter;
+ u32 seqno;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!dev_priv->mm.interruptible);
+
+ seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+ if (seqno == 0)
+ return 0;
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ mutex_unlock(&dev->struct_mutex);
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+ mutex_lock(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ return i915_gem_object_wait_rendering__tail(obj, ring);
+}
+
+/**
+ * Called when user space prepares to use an object with the CPU, either
+ * through the mmap ioctl's mapping or a GTT mapping.
+ */
+int
+/* LINTED */
+i915_gem_set_domain_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_set_domain *args = data;
+ struct drm_i915_gem_object *obj;
+ uint32_t read_domains = args->read_domains;
+ uint32_t write_domain = args->write_domain;
+ int ret;
+
+ /* Only handle setting domains to types used by the CPU. */
+ if (write_domain & I915_GEM_GPU_DOMAINS)
+ return -EINVAL;
+
+ if (read_domains & I915_GEM_GPU_DOMAINS)
+ return -EINVAL;
+
+ /* Having something in the write domain implies it's in the read
+ * domain, and only that read domain. Enforce that in the request.
+ */
+ if (write_domain != 0 && read_domains != write_domain)
+ return -EINVAL;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ /* Try to flush the object off the GPU without holding the lock.
+ * We will repeat the flush holding the lock in the normal manner
+ * to catch cases where we are gazumped.
+ */
+ ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+ if (ret)
+ goto unref;
+
+ if (read_domains & I915_GEM_DOMAIN_GTT) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+
+ /* Silently promote "you're not bound, there was nothing to do"
+ * to success, since the client was just asking us to
+ * make sure everything was done.
+ */
+ if (ret == -EINVAL)
+ ret = 0;
+ } else {
+ ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ }
+
+unref:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * Called when user space has done writes to this buffer
+ */
+int
+/* LINTED */
+i915_gem_sw_finish_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_sw_finish *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret = 0;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ /* Pinned buffers may be scanout, so flush the cache */
+ if (obj->pin_count)
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * Maps the contents of an object, returning the address it is mapped
+ * into.
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ */
+int
+/* LINTED */
+i915_gem_mmap_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_mmap *args = data;
+ struct drm_gem_object *obj;
+ caddr_t vvaddr = NULL;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (obj == NULL)
+ return -EBADF;
+
+ /* prime objects have no backing filp to GEM mmap
+ * pages from.
+ */
+
+ if (obj->size > dev_priv->gtt.mappable_end) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -E2BIG;
+ }
+
+ ret = ddi_devmap_segmap(dev_id, (off_t)obj->maplist.user_token,
+ ttoproc(curthread)->p_as, &vvaddr, obj->maplist.map->size,
+ PROT_ALL, PROT_ALL, MAP_SHARED, credp);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ args->addr_ptr = (uint64_t)(uintptr_t)vvaddr;
+
+ return 0;
+}
+
+void
+i915_gem_fault(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ uint64_t start;
+ int ret = 0;
+
+ if (obj->maplist.map->gtt_mmap)
+ return;
+
+ /* Now bind it into the GTT if needed */
+ mutex_lock(&dev->struct_mutex);
+
+ TRACE_GEM_OBJ_HISTORY(obj_priv, "gfault");
+
+ /* Access to snoopable pages through the GTT is incoherent. */
+ if (obj_priv->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = i915_gem_object_pin(obj_priv, 0, true, false);
+ if (ret)
+ goto unlock;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj_priv, 1);
+ if (ret)
+ goto unpin;
+
+ ret = i915_gem_object_get_fence(obj_priv);
+ if (ret)
+ goto unpin;
+
+ obj_priv->fault_mappable = true;
+
+ start = (dev->agp_aperbase + obj_priv->gtt_offset);
+
+ /* Finally, remap it using the new GTT offset */
+ drm_gem_mmap(obj, start);
+
+ obj->maplist.map->gtt_mmap = 1;
+
+unpin:
+ i915_gem_object_unpin(obj_priv);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * i915_gem_create_mmap_offset - create a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call. The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj.
+ */
+static int
+i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct ddi_umem_cookie *umem_cookie = obj->base.maplist.map->umem_cookie;
+ int ret;
+
+ if (obj->base.gtt_map_kaddr == NULL) {
+ ret = drm_gem_create_mmap_offset(&obj->base);
+ if (ret) {
+ DRM_ERROR("failed to alloc kernel memory");
+ return ret;
+ }
+ }
+
+ umem_cookie->cvaddr = obj->base.gtt_map_kaddr;
+
+ /* user_token is the fake offset
+ * which create in drm_map_handle at alloc time
+ */
+ obj->mmap_offset = obj->base.maplist.user_token;
+ obj->base.maplist.map->callback = 1;
+
+ return 0;
+}
+
+/**
+ * i915_gem_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmaping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ *
+ * It is vital that we remove the page mapping if we have mapped a tiled
+ * object through the GTT and then lose the fence register due to
+ * resource pressure. Similarly if the object has been moved out of the
+ * aperture, than pages mapped into userspace must be revoked. Removing the
+ * mapping will then trigger a page fault on the next user access, allowing
+ * fixup by i915_gem_fault().
+ */
+void
+i915_gem_release_mmap(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct gem_map_list *entry, *temp;
+
+ if (obj->base.maplist.map->gtt_mmap) {
+ mutex_lock(&dev->page_fault_lock);
+ if (!list_empty(&obj->base.seg_list)) {
+ list_for_each_entry_safe(entry, temp, struct gem_map_list, &obj->base.seg_list, head) {
+ devmap_unload(entry->dhp, entry->mapoffset, entry->maplen);
+ list_del(&entry->head);
+ drm_free(entry, sizeof (struct gem_map_list), DRM_MEM_MAPS);
+ }
+ }
+ mutex_unlock(&dev->page_fault_lock);
+ drm_gem_release_mmap(&obj->base);
+ obj->base.maplist.map->gtt_mmap = 0;
+ }
+}
+
+static void
+i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ drm_gem_free_mmap_offset(&obj->base);
+ obj->mmap_offset = 0;
+}
+
+uint32_t
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
+{
+ uint32_t gtt_size;
+
+ if (INTEL_INFO(dev)->gen >= 4 ||
+ tiling_mode == I915_TILING_NONE)
+ return size;
+
+ /* Previous chips need a power-of-two fence region when tiling */
+ if (INTEL_INFO(dev)->gen == 3)
+ gtt_size = 1024*1024;
+ else
+ gtt_size = 512*1024;
+
+ while (gtt_size < size)
+ gtt_size <<= 1;
+
+ return gtt_size;
+}
+
+/**
+ * i915_gem_get_gtt_alignment - return required GTT alignment for an object
+ * @obj: object to check
+ *
+ * Return the required GTT alignment for an object, taking into account
+ * potential fence register mapping if needed.
+ */
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+ int tiling_mode, bool fenced)
+{
+ /*
+ * Minimum alignment is 4k (GTT page size), but might be greater
+ * if a fence register is needed for the object.
+ */
+ if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
+ tiling_mode == I915_TILING_NONE)
+ return 4096;
+
+ /*
+ * Previous chips need to be aligned to the size of the smallest
+ * fence register that can contain the object.
+ */
+ return i915_gem_get_gtt_size(dev, size, tiling_mode);
+}
+
+
+int
+i915_gem_mmap_gtt(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if (obj->base.size > dev_priv->gtt.mappable_end) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ if (!obj->mmap_offset) {
+ ret = i915_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+ }
+
+ *offset = obj->mmap_offset;
+
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * @dev: DRM device
+ * @data: GTT mapping ioctl data
+ * @file_priv: GEM object info
+ *
+ * Simply returns the fake offset to userspace so it can mmap it.
+ * The mmap call will end up in drm_gem_mmap(), which will set things
+ * up so we can get faults in the handler above.
+ *
+ * The fault handler will take care of binding the object into the GTT
+ * (since it may have been evicted to make room for something), allocating
+ * a fence register, and mapping the appropriate aperture address into
+ * userspace.
+ */
+int
+/* LINTED */
+i915_gem_mmap_gtt_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_mmap_gtt *args = data;
+
+ return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+}
+
+static void
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+{
+ int ret;
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret) {
+ /* In the event of a disaster, abandon all caches and
+ * hope for the best.
+ */
+ WARN_ON(ret != -EIO);
+ i915_gem_clflush_object(obj);
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_save_bit_17_swizzle(obj);
+
+ obj->dirty = 0;
+
+ kmem_free(obj->page_list,
+ btop(obj->base.size) * sizeof(caddr_t));
+ obj->page_list = NULL;
+}
+
+static int
+i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+{
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+
+ if (obj->page_list == NULL)
+ return 0;
+
+ BUG_ON(obj->gtt_space);
+
+ if (obj->pages_pin_count)
+ return -EBUSY;
+
+ ops->put_pages(obj);
+ obj->page_list = NULL;
+
+ list_del(&obj->global_list);
+ return 0;
+}
+
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+{
+ pgcnt_t np = btop(obj->base.size);
+ caddr_t va;
+ long i;
+
+ obj->page_list = kmem_zalloc(np * sizeof(caddr_t), KM_SLEEP);
+ if (obj->page_list == NULL) {
+ DRM_ERROR("Faled to allocate page list. size = %ld", np * sizeof(caddr_t));
+ return -ENOMEM;
+ }
+
+ for (i = 0, va = obj->base.kaddr; i < np; i++, va += PAGESIZE) {
+ obj->page_list[i] = va;
+ }
+
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_do_bit_17_swizzle(obj);
+ return 0;
+}
+
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_get_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_put_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int
+i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+ int ret;
+
+ if (obj->page_list)
+ return 0;
+
+ BUG_ON(obj->pages_pin_count);
+
+ ret = ops->get_pages(obj);
+ if (ret)
+ return ret;
+
+ list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list, (caddr_t)obj);
+ return 0;
+}
+
+void
+i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 seqno = intel_ring_get_seqno(ring);
+
+ BUG_ON(ring == NULL);
+ if (obj->ring != ring && obj->last_write_seqno) {
+ /* Keep the seqno relative to the current ring */
+ obj->last_write_seqno = seqno;
+ }
+ obj->ring = ring;
+
+ /* Add a reference if we're newly entering the active list. */
+ if (!obj->active) {
+ drm_gem_object_reference(&obj->base);
+ obj->active = 1;
+ }
+
+ /* Move from whatever list we were on to the tail of execution. */
+ list_move_tail(&obj->mm_list, &dev_priv->mm.active_list, (caddr_t)obj);
+ list_move_tail(&obj->ring_list, &ring->active_list, (caddr_t)obj);
+ obj->last_read_seqno = seqno;
+
+ if (obj->fenced_gpu_access) {
+ obj->last_fenced_seqno = seqno;
+
+ /* Bump MRU to take account of the delayed flush */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_fence_reg *reg;
+
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list, (caddr_t)reg);
+ }
+ }
+ TRACE_GEM_OBJ_HISTORY(obj, "to active");
+}
+
+static void
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
+ BUG_ON(!obj->active);
+
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list, (caddr_t)obj);
+
+ list_del_init(&obj->ring_list);
+ obj->ring = NULL;
+
+ obj->last_read_seqno = 0;
+ obj->last_write_seqno = 0;
+ obj->base.write_domain = 0;
+
+ obj->last_fenced_seqno = 0;
+ obj->fenced_gpu_access = false;
+
+ obj->active = 0;
+ TRACE_GEM_OBJ_HISTORY(obj, "to inactive");
+ drm_gem_object_unreference(&obj->base);
+
+ WARN_ON(i915_verify_lists(dev));
+}
+
+static int
+i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int ret, i, j;
+
+ /* Carefully retire all requests without writing to the rings */
+ for_each_ring(ring, dev_priv, i) {
+ ret = intel_ring_idle(ring);
+ if (ret)
+ return ret;
+ }
+ i915_gem_retire_requests(dev);
+
+ /* Finally reset hw state */
+ for_each_ring(ring, dev_priv, i) {
+ intel_ring_init_seqno(ring, seqno);
+
+ for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+ ring->sync_seqno[j] = 0;
+ }
+
+ return 0;
+}
+
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (seqno == 0)
+ return -EINVAL;
+
+ /* HWS page needs to be set less than what we
+ * will inject to ring
+ */
+ ret = i915_gem_init_seqno(dev, seqno - 1);
+ if (ret)
+ return ret;
+
+ /* Carefully set the last_seqno value so that wrap
+ * detection still works
+ */
+ dev_priv->next_seqno = seqno;
+ dev_priv->last_seqno = seqno - 1;
+ if (dev_priv->last_seqno == 0)
+ dev_priv->last_seqno--;
+
+ return 0;
+}
+
+int
+i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* reserve 0 for non-seqno */
+ if (dev_priv->next_seqno == 0) {
+ int ret = i915_gem_init_seqno(dev, 0);
+ if (ret)
+ return ret;
+
+ dev_priv->next_seqno = 1;
+ }
+
+ *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
+ return 0;
+}
+
+int __i915_add_request(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct drm_i915_gem_object *obj,
+ u32 *out_seqno)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_request *request;
+ u32 request_ring_position, request_start;
+ int was_empty;
+ int ret;
+
+ request_start = intel_ring_get_tail(ring);
+ /*
+ * Emit any outstanding flushes - execbuf can fail to emit the flush
+ * after having emitted the batchbuffer command. Hence we need to fix
+ * things up similar to emitting the lazy request. The difference here
+ * is that the flush _must_ happen before the next request, no matter
+ * what.
+ */
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
+
+ request = kmalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
+
+ /* Record the position of the start of the request so that
+ * should we detect the updated seqno part-way through the
+ * GPU processing the request, we never over-estimate the
+ * position of the head.
+ */
+ request_ring_position = intel_ring_get_tail(ring);
+
+ ret = ring->add_request(ring);
+ if (ret) {
+ kfree(request, sizeof(*request));
+ return ret;
+ }
+
+ request->seqno = intel_ring_get_seqno(ring);
+ request->ring = ring;
+ request->head = request_start;
+ request->tail = request_ring_position;
+ request->ctx = ring->last_context;
+ request->batch_obj = obj;
+
+ /* Whilst this request exists, batch_obj will be on the
+ * active_list, and so will hold the active reference. Only when this
+ * request is retired will the the batch_obj be moved onto the
+ * inactive_list and lose its active reference. Hence we do not need
+ * to explicitly hold another reference here.
+ */
+
+ if (request->ctx)
+ i915_gem_context_reference(request->ctx);
+
+ request->emitted_jiffies = jiffies;
+ was_empty = list_empty(&ring->request_list);
+ list_add_tail(&request->list, &ring->request_list, (caddr_t)request);
+ request->file_priv = NULL;
+
+ if (file) {
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ if (file_priv->status == 1) {
+ spin_lock(&file_priv->mm.lock);
+ request->file_priv = file_priv;
+ list_add_tail(&request->client_list,
+ &file_priv->mm.request_list, (caddr_t)request);
+ spin_unlock(&file_priv->mm.lock);
+ }
+ }
+
+ ring->outstanding_lazy_request = 0;
+
+ if (!dev_priv->mm.suspended && !dev_priv->gpu_hang) {
+ if (i915_enable_hangcheck) {
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ }
+ if (was_empty) {
+ /* change to delay HZ and then run work (not insert to workqueue of Linux) */
+ test_set_timer(&dev_priv->mm.retire_timer, DRM_HZ);
+ DRM_DEBUG("i915_gem: schedule_delayed_work");
+ intel_mark_busy(dev_priv->dev);
+ }
+ }
+
+ if (out_seqno)
+ *out_seqno = request->seqno;
+
+ return 0;
+}
+
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+{
+ struct drm_i915_file_private *file_priv = request->file_priv;
+
+ if (!file_priv)
+ return;
+
+ spin_lock(&file_priv->mm.lock);
+ if (request->file_priv) {
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ }
+ spin_unlock(&file_priv->mm.lock);
+}
+
+static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
+{
+ if (acthd >= obj->gtt_offset &&
+ acthd < obj->gtt_offset + obj->base.size)
+ return true;
+
+ return false;
+}
+
+static bool i915_head_inside_request(const u32 acthd_unmasked,
+ const u32 request_start,
+ const u32 request_end)
+{
+ const u32 acthd = acthd_unmasked & HEAD_ADDR;
+
+ if (request_start < request_end) {
+ if (acthd >= request_start && acthd < request_end)
+ return true;
+ } else if (request_start > request_end) {
+ if (acthd >= request_start || acthd < request_end)
+ return true;
+ }
+
+ return false;
+}
+
+static bool i915_request_guilty(struct drm_i915_gem_request *request,
+ const u32 acthd, bool *inside)
+{
+ /* There is a possibility that unmasked head address
+ * pointing inside the ring, matches the batch_obj address range.
+ * However this is extremely unlikely.
+ */
+
+ if (request->batch_obj) {
+ if (i915_head_inside_object(acthd, request->batch_obj)) {
+ *inside = true;
+ return true;
+ }
+ }
+
+ if (i915_head_inside_request(acthd, request->head, request->tail)) {
+ *inside = false;
+ return true;
+ }
+
+ return false;
+}
+
+static void i915_set_reset_status(struct intel_ring_buffer *ring,
+ struct drm_i915_gem_request *request,
+ u32 acthd)
+{
+ struct i915_ctx_hang_stats *hs = NULL;
+ bool inside, guilty;
+
+ /* Innocent until proven guilty */
+ guilty = false;
+
+ if (ring->hangcheck.action != wait &&
+ i915_request_guilty(request, acthd, &inside)) {
+ DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
+ ring->name,
+ inside ? "inside" : "flushing",
+ request->batch_obj ?
+ request->batch_obj->gtt_offset : 0,
+ request->ctx ? request->ctx->id : 0,
+ acthd);
+
+ guilty = true;
+ }
+
+ /* If contexts are disabled or this is the default context, use
+ * file_priv->reset_state
+ */
+ if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
+ hs = &request->ctx->hang_stats;
+ else if (request->file_priv)
+ hs = &request->file_priv->hang_stats;
+
+ if (hs) {
+ if (guilty)
+ hs->batch_active++;
+ else
+ hs->batch_pending++;
+ }
+}
+
+static void i915_gem_free_request(struct drm_i915_gem_request *request)
+{
+ list_del(&request->list);
+ i915_gem_request_remove_from_client(request);
+
+ if (request->ctx)
+ i915_gem_context_unreference(request->ctx);
+
+ kfree(request, sizeof(*request));
+}
+
+static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ u32 completed_seqno;
+ u32 acthd;
+
+ acthd = intel_ring_get_active_head(ring);
+ completed_seqno = ring->get_seqno(ring, false);
+
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ if (request->seqno > completed_seqno)
+ i915_set_reset_status(ring, request, acthd);
+
+ i915_gem_free_request(request);
+ }
+
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
+
+ i915_gem_object_move_to_inactive(obj);
+ }
+}
+
+void i915_gem_restore_fences(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+ /*
+ * Commit delayed tiling changes if we have an object still
+ * attached to the fence, otherwise just clear the fence.
+ */
+ if (reg->obj) {
+ i915_gem_object_update_fence(reg->obj, reg,
+ reg->obj->tiling_mode);
+ } else {
+ i915_gem_write_fence(dev, i, NULL);
+ }
+ }
+}
+
+void i915_gem_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct intel_ring_buffer *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_reset_ring_lists(dev_priv, ring);
+
+ /* Move everything out of the GPU domains to ensure we do any
+ * necessary invalidation upon reuse.
+ */
+ list_for_each_entry(obj, struct drm_i915_gem_object,
+ &dev_priv->mm.inactive_list,
+ mm_list)
+ {
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ i915_gem_restore_fences(dev);
+}
+
+/**
+ * This function clears the request list as sequence numbers are passed.
+ */
+void
+i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
+{
+ uint32_t seqno;
+
+ if (list_empty(&ring->request_list))
+ return;
+
+ WARN_ON(i915_verify_lists(ring->dev));
+
+ seqno = ring->get_seqno(ring, true);
+
+ while (!list_empty(&ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ if (!i915_seqno_passed(seqno, request->seqno))
+ break;
+
+ /* We know the GPU must have read the request to have
+ * sent us the seqno + interrupt, so use the position
+ * of tail of the request to update the last known position
+ * of the GPU head.
+ */
+ ring->last_retired_head = request->tail;
+
+ i915_gem_free_request(request);
+ }
+
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate.
+ */
+ while (!list_empty(&ring->active_list)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
+
+ if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+ break;
+
+ i915_gem_object_move_to_inactive(obj);
+ }
+
+ if (ring->trace_irq_seqno &&
+ i915_seqno_passed(seqno, ring->trace_irq_seqno)) {
+ ring->irq_put(ring);
+ ring->trace_irq_seqno = 0;
+ }
+
+ WARN_ON(i915_verify_lists(ring->dev));
+}
+
+void
+i915_gem_retire_requests(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_retire_requests_ring(ring);
+}
+
+static void
+i915_gem_retire_work_handler(struct work_struct *work)
+{
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ mm.retire_work);
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_ring_buffer *ring;
+ bool idle;
+ int i;
+
+ /* Come back later if the device is busy... */
+ if (!mutex_tryenter(&dev->struct_mutex)) {
+ test_set_timer(&dev_priv->mm.retire_timer, DRM_HZ);
+ return;
+ }
+
+ i915_gem_retire_requests(dev);
+
+ /* Send a periodic flush down the ring so we don't hold onto GEM
+ * objects indefinitely.
+ */
+ idle = true;
+ for_each_ring(ring, dev_priv, i) {
+ if (ring->gpu_caches_dirty)
+ i915_add_request(ring, NULL);
+
+ idle &= list_empty(&ring->request_list);
+ }
+
+ if (!dev_priv->mm.suspended && !idle && !dev_priv->gpu_hang)
+ {
+ DRM_DEBUG("i915_gem: schedule_delayed_work");
+ test_set_timer(&dev_priv->mm.retire_timer, DRM_HZ);
+ }
+ if (idle)
+ intel_mark_idle(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+void
+i915_gem_retire_work_timer(void *device)
+{
+ struct drm_device *dev = (struct drm_device *)device;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ queue_work(dev_priv->wq, &dev_priv->mm.retire_work);
+}
+
+/**
+ * Ensures that an object will eventually get non-busy by flushing any required
+ * write domains, emitting any outstanding lazy request and retiring and
+ * completed requests.
+ */
+static int
+i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if (obj->active) {
+ ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests_ring(obj->ring);
+ }
+
+ return 0;
+}
+
+/**
+ * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Returns 0 if successful, else an error is returned with the remaining time in
+ * the timeout parameter.
+ * -ETIME: object is still busy after timeout
+ * -ERESTARTSYS: signal interrupted the wait
+ * -ENONENT: object doesn't exist
+ * Also possible, but rare:
+ * -EAGAIN: GPU wedged
+ * -ENOMEM: damn
+ * -ENODEV: Internal IRQ fail
+ * -E?: The add request failed
+ *
+ * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
+ * non-zero timeout parameter the wait ioctl will wait for the given number of
+ * nanoseconds on an object becoming unbusy. Since the wait itself does so
+ * without holding struct_mutex the object may become re-busied before this
+ * function completes. A similar but shorter * race condition exists in the busy
+ * ioctl
+ */
+int
+i915_gem_wait_ioctl(DRM_IOCTL_ARGS)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_wait *args = data;
+ struct drm_i915_gem_object *obj;
+ struct intel_ring_buffer *ring = NULL;
+ clock_t timeout = NULL;
+ unsigned reset_counter;
+ u32 seqno = 0;
+ int ret = 0;
+
+ if (args->timeout_ns >= 0) {
+ timeout = drv_usectohz(args->timeout_ns / 1000);
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
+ if (&obj->base == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ return -ENOENT;
+ }
+
+ /* Need to make sure the object gets inactive eventually. */
+ ret = i915_gem_object_flush_active(obj);
+ if (ret)
+ goto out;
+
+ if (obj->active) {
+ seqno = obj->last_read_seqno;
+ ring = obj->ring;
+ }
+
+ if (seqno == 0)
+ goto out;
+
+ /* Do this after OLR check to make sure we make forward progress polling
+ * on this IOCTL with a 0 timeout (like busy ioctl)
+ */
+ if (!args->timeout_ns) {
+ ret = -ETIME;
+ goto out;
+ }
+
+ drm_gem_object_unreference(&obj->base);
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ mutex_unlock(&dev->struct_mutex);
+
+ ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
+ if (timeout) {
+ args->timeout_ns = drv_hztousec(timeout) * 1000;
+ }
+ return ret;
+
+out:
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * i915_gem_object_sync - sync an object to a ring.
+ *
+ * @obj: object which may be in use on another ring.
+ * @to: ring we wish to use the object on. May be NULL.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Calling with NULL implies synchronizing the object with the CPU
+ * rather than a particular GPU ring.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to)
+{
+ struct intel_ring_buffer *from = obj->ring;
+ u32 seqno;
+ int ret, idx;
+
+ if (from == NULL || to == from)
+ return 0;
+
+ if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
+ return i915_gem_object_wait_rendering(obj, false);
+
+ idx = intel_ring_sync_index(from, to);
+
+ seqno = obj->last_read_seqno;
+ if (seqno <= from->sync_seqno[idx])
+ return 0;
+
+ ret = i915_gem_check_olr(obj->ring, seqno);
+ if (ret)
+ return ret;
+
+ ret = to->sync_to(to, from, seqno);
+ if (!ret)
+ /* We use last_read_seqno because sync_to()
+ * might have just caused seqno wrap under
+ * the radar.
+ */
+ from->sync_seqno[idx] = obj->last_read_seqno;
+
+ return ret;
+}
+
+static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
+{
+
+ /* Force a pagefault for domain tracking on next user access */
+ i915_gem_release_mmap(obj);
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ return;
+
+ membar_producer();
+ obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
+}
+
+/**
+ * Unbinds an object from the GTT aperture.
+ */
+int
+i915_gem_object_unbind(struct drm_i915_gem_object *obj, uint32_t type)
+{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+ int ret;
+
+ if (obj->gtt_space == NULL)
+ return 0;
+
+ if (obj->pin_count)
+ return -EBUSY;
+
+ BUG_ON(obj->page_list == NULL);
+
+ ret = i915_gem_object_finish_gpu(obj);
+ if (ret)
+ return ret;
+ /* Continue on if we fail due to EIO, the GPU is hung so we
+ * should be safe and we need to cleanup or else we might
+ * cause memory corruption through use-after-free.
+ */
+
+ i915_gem_object_finish_gtt(obj);
+
+ /* release the fence reg _after_ flushing */
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+
+ if (obj->has_global_gtt_mapping)
+ i915_gem_gtt_unbind_object(obj, type);
+ if (obj->has_aliasing_ppgtt_mapping) {
+ i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
+ obj->has_aliasing_ppgtt_mapping = 0;
+ }
+ i915_gem_gtt_finish_object(obj);
+ i915_gem_object_unpin_pages(obj);
+
+ list_del(&obj->mm_list);
+ list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list, (caddr_t)obj);
+ /* Avoid an unnecessary call to unbind on rebind. */
+ obj->map_and_fenceable = true;
+
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
+ obj->gtt_offset = 0;
+ TRACE_GEM_OBJ_HISTORY(obj, "unbind");
+ return 0;
+}
+
+
+int i915_gpu_idle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int ret, i;
+
+ /* Flush everything onto the inactive list. */
+ for_each_ring(ring, dev_priv, i) {
+ ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
+ if (ret)
+ return ret;
+
+ ret = intel_ring_idle(ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int fence_reg;
+ int fence_pitch_shift;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ fence_reg = FENCE_REG_SANDYBRIDGE_0;
+ fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
+ } else {
+ fence_reg = FENCE_REG_965_0;
+ fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+ }
+
+ fence_reg += reg * 8;
+
+ /* To w/a incoherency with non-atomic 64-bit register updates,
+ * we split the 64-bit update into two 32-bit writes. In order
+ * for a partial fence not to be evaluated between writes, we
+ * precede the update with write to turn off the fence register,
+ * and only enable the fence as the last step.
+ *
+ * For extra levels of paranoia, we make sure each step lands
+ * before applying the next step.
+ */
+ I915_WRITE(fence_reg, 0);
+ POSTING_READ(fence_reg);
+
+ if (obj) {
+ u32 size = obj->gtt_space->size;
+ uint64_t val;
+
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
+
+ I915_WRITE(fence_reg + 4, val >> 32);
+ POSTING_READ(fence_reg + 4);
+
+ I915_WRITE(fence_reg + 0, val);
+ POSTING_READ(fence_reg);
+ } else {
+ I915_WRITE(fence_reg + 4, 0);
+ POSTING_READ(fence_reg + 4);
+ }
+}
+
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 val;
+
+ if (obj) {
+ u32 size = obj->gtt_space->size;
+ int pitch_val;
+ int tile_width;
+
+ if((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)))
+ DRM_ERROR("object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ obj->gtt_offset, obj->map_and_fenceable, size);
+
+ if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ /* Note: pitch better be a power of two tile widths */
+ pitch_val = obj->stride / tile_width;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I915_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ } else
+ val = 0;
+
+ if (reg < 8)
+ reg = FENCE_REG_830_0 + reg * 4;
+ else
+ reg = FENCE_REG_945_8 + (reg - 8) * 4;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t val;
+
+ if (obj) {
+ u32 size = obj->gtt_space->size;
+ uint32_t pitch_val;
+
+ if((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)))
+ DRM_ERROR("object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+ obj->gtt_offset, size);
+
+ pitch_val = obj->stride / 128;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I830_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ } else
+ val = 0;
+
+ I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+ POSTING_READ(FENCE_REG_830_0 + reg * 4);
+}
+
+inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
+{
+ return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+}
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Ensure that all CPU reads are completed before installing a fence
+ * and all writes before removing the fence.
+ */
+ if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
+ membar_producer();
+
+ if(obj && (!obj->stride || !obj->tiling_mode))
+ DRM_ERROR("bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+ obj->stride, obj->tiling_mode);
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ case 5:
+ case 4: i965_write_fence_reg(dev, reg, obj); break;
+ case 3: i915_write_fence_reg(dev, reg, obj); break;
+ case 2: i830_write_fence_reg(dev, reg, obj); break;
+ default: BUG();
+ }
+
+ /* And similarly be paranoid that no direct access to this region
+ * is reordered to before the fence is installed.
+ */
+ if (i915_gem_object_needs_mb(obj))
+ membar_producer();
+}
+
+static inline int fence_number(struct drm_i915_private *dev_priv,
+ struct drm_i915_fence_reg *fence)
+{
+ return fence - dev_priv->fence_regs;
+}
+
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int reg = fence_number(dev_priv, fence);
+
+ i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+ if (enable) {
+ obj->fence_reg = reg;
+ fence->obj = obj;
+ list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list, (caddr_t)fence);
+ } else {
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ fence->obj = NULL;
+ list_del_init(&fence->lru_list);
+ }
+ obj->fence_dirty = false;
+}
+
+static int
+i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->last_fenced_seqno) {
+ int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+ if (ret)
+ return ret;
+
+ obj->last_fenced_seqno = 0;
+ }
+
+ obj->fenced_gpu_access = false;
+ return 0;
+}
+
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_fence_reg *fence;
+ int ret;
+
+ ret = i915_gem_object_wait_fence(obj);
+ if (ret)
+ return ret;
+
+ if (obj->fence_reg == I915_FENCE_REG_NONE)
+ return 0;
+
+ fence = &dev_priv->fence_regs[obj->fence_reg];
+
+ i915_gem_object_fence_lost(obj);
+ i915_gem_object_update_fence(obj, fence, false);
+
+ return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_fence_reg *reg, *avail;
+ int i;
+
+ /* First try to find a free reg */
+ avail = NULL;
+ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+ reg = &dev_priv->fence_regs[i];
+ if (!reg->obj)
+ return reg;
+
+ if (!reg->pin_count)
+ avail = reg;
+ }
+
+ if (avail == NULL)
+ return NULL;
+
+ /* None available, try to steal one or wait for a user to finish */
+ list_for_each_entry(reg, struct drm_i915_fence_reg, &dev_priv->mm.fence_list, lru_list) {
+ if (reg->pin_count)
+ continue;
+
+ return reg;
+ }
+
+ return NULL;
+}
+
+/**
+ * i915_gem_object_get_fence_reg - set up a fence reg for an object
+ * @obj: object to map through a fence reg
+ *
+ * When mapping objects through the GTT, userspace wants to be able to write
+ * to them without having to worry about swizzling if the object is tiled.
+ * This function walks the fence regs looking for a free one for @obj,
+ * stealing one if it can't find any.
+ *
+ * It then sets up the reg based on the object's properties: address, pitch
+ * and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
+ */
+int
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool enable = obj->tiling_mode != I915_TILING_NONE;
+ struct drm_i915_fence_reg *reg;
+ int ret;
+
+ /* Have we updated the tiling parameters upon the object and so
+ * will need to serialise the write to the associated fence register?
+ */
+ if (obj->fence_dirty) {
+ ret = i915_gem_object_wait_fence(obj);
+ if (ret)
+ return ret;
+ }
+
+ /* Just update our place in the LRU if our fence is getting reused. */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ if (!obj->fence_dirty) {
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list, (caddr_t)reg);
+ return 0;
+ }
+ } else if (enable) {
+ reg = i915_find_fence_reg(dev);
+ if (reg == NULL)
+ return -EDEADLK;
+
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
+
+ ret = i915_gem_object_wait_fence(old);
+ if (ret)
+ return ret;
+
+ i915_gem_object_fence_lost(old);
+ }
+ } else
+ return 0;
+
+ i915_gem_object_update_fence(obj, reg, enable);
+
+ return 0;
+}
+
+static bool i915_gem_valid_gtt_space(struct drm_device *dev,
+ struct drm_mm_node *gtt_space,
+ unsigned long cache_level)
+{
+ struct drm_mm_node *other;
+
+ /* On non-LLC machines we have to be careful when putting differing
+ * types of snoopable memory together to avoid the prefetcher
+ * crossing memory domains and dieing.
+ */
+ if (HAS_LLC(dev))
+ return true;
+
+ if (gtt_space == NULL)
+ return true;
+
+ if (list_empty(&gtt_space->node_list))
+ return true;
+
+ other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+ if (other->allocated && !other->hole_follows && other->color != cache_level)
+ return false;
+
+ other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+ if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+ return false;
+
+ return true;
+}
+
+static void i915_gem_verify_gtt(struct drm_device *dev)
+{
+#if WATCH_GTT
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ list_for_each_entry(obj, struct drm_i915_gem_object, &dev_priv->mm.gtt_list, global_list) {
+ if (obj->gtt_space == NULL) {
+ DRM_ERROR("object found on GTT list with no space reserved\n");
+ err++;
+ continue;
+ }
+
+ if (obj->cache_level != obj->gtt_space->color) {
+ DRM_ERROR("object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
+ obj->gtt_space->start,
+ obj->gtt_space->start + obj->gtt_space->size,
+ obj->cache_level,
+ obj->gtt_space->color);
+ err++;
+ continue;
+ }
+
+ if (!i915_gem_valid_gtt_space(dev,
+ obj->gtt_space,
+ obj->cache_level)) {
+ DRM_ERROR("invalid GTT space found at [%08lx, %08lx] - color=%x\n",
+ obj->gtt_space->start,
+ obj->gtt_space->start + obj->gtt_space->size,
+ obj->cache_level);
+ err++;
+ continue;
+ }
+ }
+
+ WARN_ON(err);
+#endif
+}
+
+/**
+ * Finds free space in the GTT aperture and binds the object there.
+ */
+static int
+i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment,
+ bool map_and_fenceable,
+ bool nonblocking)
+{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_mm_node *node;
+ u32 size, fence_size, fence_alignment, unfenced_alignment;
+ bool mappable, fenceable;
+ size_t gtt_max = map_and_fenceable ?
+ dev_priv->gtt.mappable_end : dev_priv->gtt.total;
+ int ret;
+
+ fence_size = i915_gem_get_gtt_size(dev,
+ obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode, true);
+ unfenced_alignment =
+ i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode, false);
+
+ if (alignment == 0)
+ alignment = map_and_fenceable ? fence_alignment :
+ unfenced_alignment;
+ if (map_and_fenceable && alignment & (fence_alignment - 1)) {
+ DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+ return -EINVAL;
+ }
+
+ size = map_and_fenceable ? fence_size : obj->base.size;
+
+ /* If the object is bigger than the entire aperture, reject it early
+ * before evicting everything in a vain attempt to find space.
+ */
+ if (obj->base.size > gtt_max) {
+ DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
+ obj->base.size,
+ map_and_fenceable ? "mappable" : "total",
+ gtt_max);
+ return -E2BIG;
+ }
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL) {
+ i915_gem_object_unpin_pages(obj);
+ return -ENOMEM;
+ }
+
+search_free:
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+ size, alignment,
+ obj->cache_level, 0, gtt_max);
+ if (ret) {
+ ret = i915_gem_evict_something(dev, size, alignment,
+ obj->cache_level,
+ map_and_fenceable,
+ nonblocking);
+ if (ret == 0)
+ goto search_free;
+
+ i915_gem_object_unpin_pages(obj);
+ kfree(node, sizeof(*node));
+ return ret;
+ }
+ if ((!i915_gem_valid_gtt_space(dev,
+ node,
+ obj->cache_level))) {
+ i915_gem_object_unpin_pages(obj);
+ drm_mm_put_block(node);
+ return -EINVAL;
+ }
+
+ ret = i915_gem_gtt_prepare_object(obj);
+ if (ret) {
+ i915_gem_object_unpin_pages(obj);
+ drm_mm_put_block(node);
+ return ret;
+ }
+
+ list_move_tail(&obj->global_list, &dev_priv->mm.bound_list, (caddr_t)obj);
+ list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list, (caddr_t)obj);
+
+ obj->gtt_space = node;
+ obj->gtt_offset = node->start;
+
+ fenceable =
+ node->size == fence_size &&
+ (node->start & (fence_alignment - 1)) == 0;
+
+ mappable =
+ obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
+
+ obj->map_and_fenceable = mappable && fenceable;
+
+ TRACE_GEM_OBJ_HISTORY(obj, "bind gtt");
+ i915_gem_verify_gtt(dev);
+ return 0;
+}
+
+void
+i915_gem_clflush_object(struct drm_i915_gem_object *obj)
+{
+ /* If we don't have a page list set up, then we're not pinned
+ * to GPU, and we can ignore the cache flush because it'll happen
+ * again at bind time.
+ */
+ if (obj->page_list == NULL)
+ return;
+
+ /*
+ * Stolen memory is always coherent with the GPU as it is explicitly
+ * marked as wc by the system, or the system is cache-coherent.
+ */
+ if (obj->stolen)
+ return;
+
+ /* If the GPU is snooping the contents of the CPU cache,
+ * we do not need to manually clear the CPU cache lines. However,
+ * the caches are only snooped when the render cache is
+ * flushed/invalidated. As we always have to emit invalidations
+ * and flushes when moving into and out of the RENDER domain, correct
+ * snooping behaviour occurs naturally as the result of our domain
+ * tracking.
+ */
+ if (obj->cache_level != I915_CACHE_NONE)
+ return;
+
+ drm_clflush_pages(obj->page_list, obj->base.size / PAGE_SIZE);
+ TRACE_GEM_OBJ_HISTORY(obj, "clflush");
+}
+
+/** Flushes the GTT write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
+{
+ if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
+ return;
+
+ /* No actual flushing is required for the GTT write domain. Writes
+ * to it immediately go to main memory as far as we know, so there's
+ * no chipset flush. It also doesn't land in render cache.
+ *
+ * However, we do have to enforce the order so that all writes through
+ * the GTT land before any writes to the device, such as updates to
+ * the GATT itself.
+ */
+ membar_producer();
+
+ obj->base.write_domain = 0;
+}
+
+/** Flushes the CPU write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
+ return;
+
+ i915_gem_clflush_object(obj);
+ i915_gem_chipset_flush(dev);
+ obj->base.write_domain = 0;
+}
+
+/**
+ * Moves a single object to the GTT read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write)
+{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+ int ret;
+
+ /* Not valid to be called on unbound objects. */
+ if (obj->gtt_space == NULL)
+ return -EINVAL;
+
+ if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+ return 0;
+
+ ret = i915_gem_object_wait_rendering(obj, !write);
+ if (ret)
+ return ret;
+
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ /* Serialise direct access to this object with the barriers for
+ * coherent writes from the GPU, by effectively invalidating the
+ * GTT domain upon first access.
+ */
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ membar_producer();
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ /* GPU reset can handle this error */
+// BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+ if (write) {
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->dirty = 1;
+ }
+
+ /* And bump the LRU for this access */
+ if (i915_gem_object_is_inactive(obj))
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list, (caddr_t)obj);
+
+ return 0;
+}
+
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (obj->cache_level == cache_level)
+ return 0;
+
+ if (obj->pin_count) {
+ DRM_DEBUG("can not change the cache level of pinned objects\n");
+ return -EBUSY;
+ }
+
+ if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+ ret = i915_gem_object_unbind(obj, true);
+ if (ret)
+ return ret;
+ }
+
+ if (obj->gtt_space) {
+ ret = i915_gem_object_finish_gpu(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_finish_gtt(obj);
+
+ /* Before SandyBridge, you could not use tiling or fence
+ * registers with snooped memory, so relinquish any fences
+ * currently pointing to our region in the aperture.
+ */
+ if (INTEL_INFO(dev)->gen < 6) {
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+ }
+
+ if (obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(obj, cache_level);
+ if (obj->has_aliasing_ppgtt_mapping)
+ i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+ obj, cache_level);
+
+ obj->gtt_space->color = cache_level;
+ }
+
+ if (cache_level == I915_CACHE_NONE) {
+ /* If we're coming from LLC cached, then we haven't
+ * actually been tracking whether the data is in the
+ * CPU cache or not, since we only allow one bit set
+ * in obj->write_domain and have been skipping the clflushes.
+ * Just set it to the CPU cache for now.
+ */
+ WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
+ WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
+
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ obj->cache_level = cache_level;
+ i915_gem_verify_gtt(dev);
+ return 0;
+}
+
+int i915_gem_get_caching_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ args->caching = obj->cache_level != I915_CACHE_NONE;
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int i915_gem_set_caching_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ enum i915_cache_level level;
+ int ret;
+
+ switch (args->caching) {
+ case I915_CACHING_NONE:
+ level = I915_CACHE_NONE;
+ break;
+ case I915_CACHING_CACHED:
+ level = I915_CACHE_LLC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ ret = i915_gem_object_set_cache_level(obj, level);
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/*
+ * Prepare buffer for display plane (scanout, cursors, etc).
+ * Can be called from an uninterruptible phase (modesetting) and allows
+ * any flushes to be pipelined (for pageflips).
+ */
+int
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
+ struct intel_ring_buffer *pipelined)
+{
+ /* LINTED */
+ u32 old_read_domains, old_write_domain;
+ int ret;
+
+ if (pipelined != obj->ring) {
+ ret = i915_gem_object_sync(obj, pipelined);
+ if (ret)
+ return ret;
+ }
+
+ /* The display engine is not coherent with the LLC cache on gen6. As
+ * a result, we make sure that the pinning that is about to occur is
+ * done with uncached PTEs. This is lowest common denominator for all
+ * chipsets.
+ *
+ * However for gen6+, we could do better by using the GFDT bit instead
+ * of uncaching, which would allow us to flush all the LLC-cached data
+ * with that bit in the PTE to main memory with just one PIPE_CONTROL.
+ */
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ if (ret)
+ return ret;
+
+ /* As the user may map the buffer once pinned in the display plane
+ * (e.g. libkms for the bootup splash), we have to ensure that we
+ * always use map_and_fenceable for all scanout buffers.
+ */
+ ret = i915_gem_object_pin(obj, alignment, true, false);
+ if (ret)
+ return ret;
+
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ obj->base.write_domain = 0;
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+
+ return 0;
+}
+
+int
+i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
+ return 0;
+
+ ret = i915_gem_object_wait_rendering(obj, false);
+ if (ret)
+ return ret;
+
+ /* Ensure that we invalidate the GPU's caches and TLBs. */
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+ return 0;
+}
+
+/**
+ * Moves a single object to the CPU read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
+{
+ /* LINTED */
+ uint32_t old_write_domain, old_read_domains;
+ int ret;
+
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+ return 0;
+
+ ret = i915_gem_object_wait_rendering(obj, !write);
+ if (ret)
+ return ret;
+
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
+
+ /* Flush the CPU cache if it's still invalid. */
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ i915_gem_clflush_object(obj);
+
+ obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
+ }
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+
+ /* If we're writing through the CPU, then the GPU read domains will
+ * need to be invalidated at next use.
+ */
+ if (write) {
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ return 0;
+}
+
+/* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * Note that if we were to use the current jiffies each time around the loop,
+ * we wouldn't escape the function with any frames outstanding if the time to
+ * render a frame was over 20ms.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+static int
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+ struct drm_i915_gem_request *request;
+ struct intel_ring_buffer *ring = NULL;
+ unsigned reset_counter;
+ u32 seqno = 0;
+ int ret;
+
+ ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
+ if (ret)
+ return ret;
+ spin_lock(&file_priv->mm.lock);
+ list_for_each_entry(request, struct drm_i915_gem_request, &file_priv->mm.request_list, client_list) {
+ if (time_after_eq(request->emitted_jiffies, recent_enough))
+ break;
+
+ ring = request->ring;
+ seqno = request->seqno;
+ }
+ reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ spin_unlock(&file_priv->mm.lock);
+
+ if (seqno == 0)
+ return 0;
+
+ ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+ if (ret == 0)
+ test_set_timer(&dev_priv->mm.retire_timer, 0);
+
+ return ret;
+}
+
+int
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable,
+ bool nonblocking)
+{
+ int ret;
+
+ if ((obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+ return -EBUSY;
+
+ if (obj->gtt_space != NULL) {
+ if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+ (map_and_fenceable && !obj->map_and_fenceable)) {
+ DRM_INFO("bo is already pinned with incorrect alignment:"
+ " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+ " obj->map_and_fenceable=%d\n",
+ obj->gtt_offset, alignment,
+ map_and_fenceable,
+ obj->map_and_fenceable);
+ ret = i915_gem_object_unbind(obj, 1);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (obj->gtt_space == NULL) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+ ret = i915_gem_object_bind_to_gtt(obj, alignment,
+ map_and_fenceable,
+ nonblocking);
+ if (ret)
+ return ret;
+
+ if (!dev_priv->mm.aliasing_ppgtt)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+ }
+
+ if (!obj->has_global_gtt_mapping && map_and_fenceable)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
+ obj->pin_count++;
+ obj->pin_mappable |= map_and_fenceable;
+
+ return 0;
+}
+
+void
+i915_gem_object_unpin(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->pin_count == 0);
+ BUG_ON(obj->gtt_space == NULL);
+
+ if (--obj->pin_count == 0)
+ obj->pin_mappable = false;
+}
+
+int
+/* LINTED */
+i915_gem_pin_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_pin *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if (obj->pin_filp != NULL && obj->pin_filp != file) {
+ DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
+ args->handle);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ obj->user_pin_count++;
+ obj->pin_filp = file;
+ if (obj->user_pin_count == 1) {
+ ret = i915_gem_object_pin(obj, args->alignment, true, false);
+ if (ret)
+ goto out;
+ }
+
+ /* XXX - flush the CPU caches for pinned objects
+ * as the X server doesn't manage domains yet
+ */
+ i915_gem_object_flush_cpu_write_domain(obj);
+ args->offset = obj->gtt_offset;
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+/* LINTED */
+i915_gem_unpin_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_pin *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ if (obj->pin_filp != file) {
+ DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
+ args->handle);
+ ret = -EINVAL;
+ goto out;
+ }
+ obj->user_pin_count--;
+ if (obj->user_pin_count == 0) {
+ obj->pin_filp = NULL;
+ i915_gem_object_unpin(obj);
+ }
+
+out:
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+/* LINTED */
+i915_gem_busy_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_busy *args = data;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ /* Count all active objects as busy, even if they are currently not used
+ * by the gpu. Users of this interface expect objects to eventually
+ * become non-busy without any further actions, therefore emit any
+ * necessary flushes here.
+ */
+ ret = i915_gem_object_flush_active(obj);
+
+ args->busy = obj->active;
+ if (obj->ring) {
+ args->busy |= intel_ring_flag(obj->ring) << 16;
+ }
+
+ drm_gem_object_unreference(&obj->base);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+/* LINTED */
+i915_gem_throttle_ioctl(DRM_IOCTL_ARGS)
+{
+ return i915_gem_ring_throttle(dev, file);
+}
+
+int
+/* LINTED */
+i915_gem_madvise_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_madvise *args = data;
+
+ /* Don't enable buffer catch */
+ args->retained = 0;
+ return 0;
+}
+
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops)
+{
+ INIT_LIST_HEAD(&obj->mm_list);
+ INIT_LIST_HEAD(&obj->global_list);
+ INIT_LIST_HEAD(&obj->ring_list);
+ INIT_LIST_HEAD(&obj->exec_list);
+
+ obj->ops = ops;
+
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ obj->madv = I915_MADV_WILLNEED;
+ /* Avoid an unnecessary call to unbind on the first bind. */
+ obj->map_and_fenceable = true;
+
+ i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+ .get_pages = i915_gem_object_get_pages_gtt,
+ .put_pages = i915_gem_object_put_pages_gtt,
+};
+
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size)
+{
+ struct drm_i915_gem_object *obj;
+ int gen;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (obj == NULL)
+ return NULL;
+
+ if (IS_G33(dev))
+ gen = 33;
+ else
+ gen = INTEL_INFO(dev)->gen * 10;
+
+ if (drm_gem_object_init(dev, &obj->base, size, gen) != 0) {
+ kfree(obj, sizeof(*obj));
+ DRM_ERROR("failed to init gem object");
+ return NULL;
+ }
+
+
+ i915_gem_object_init(obj, &i915_gem_object_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+
+ if (HAS_LLC(dev)) {
+ /* On Gen6, we can have the GPU use the LLC (the CPU
+ * cache) for about a 10% performance improvement
+ * compared to uncached. Graphics requests other than
+ * display scanout are coherent with the CPU in
+ * accessing this cache. This means in this mode we
+ * don't need to clflush on the CPU side, and on the
+ * GPU side we only need to flush internal caches to
+ * get data visible to the CPU.
+ *
+ * However, we maintain the display planes as UC, and so
+ * need to rebind when first used as such.
+ */
+ obj->cache_level = I915_CACHE_LLC;
+ } else
+ obj->cache_level = I915_CACHE_NONE;
+
+ return obj;
+}
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+ DRM_ERROR("i915_gem_init_object is not supported, BUG!");
+ return 0;
+}
+
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (obj->phys_obj)
+ i915_gem_detach_phys_object(dev, obj);
+
+ obj->pin_count = 0;
+ ret = i915_gem_object_unbind(obj, 1);
+ if (ret) {
+ bool was_interruptible;
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ WARN_ON(i915_gem_object_unbind(obj, 1));
+
+ dev_priv->mm.interruptible = was_interruptible;
+ }
+
+ /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
+ * before progressing. */
+ if (obj->stolen)
+ i915_gem_object_unpin_pages(obj);
+
+ if (obj->pages_pin_count)
+ obj->pages_pin_count = 0;
+ i915_gem_object_put_pages(obj);
+ if (obj->mmap_offset)
+ i915_gem_free_mmap_offset(obj);
+
+// if (obj->base.import_attach)
+// drm_prime_gem_destroy(&obj->base, NULL);
+
+ i915_gem_info_remove_obj(dev_priv, obj->base.size);
+
+ if (obj->bit_17 != NULL)
+ kfree(obj->bit_17, BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) * sizeof(long));
+ drm_gem_object_release(&obj->base);
+ kfree(obj, sizeof(*obj));
+}
+
+int
+i915_gem_idle(struct drm_device *dev, uint32_t type)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (dev_priv->mm.suspended) {
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+ }
+
+ ret = i915_gpu_idle(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ i915_gem_retire_requests(dev);
+
+ /* Under UMS, be paranoid and evict. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_gem_evict_everything(dev);
+
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ * And not confound mm.suspended!
+ */
+ dev_priv->mm.suspended = 1;
+ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+
+ i915_kernel_lost_context(dev);
+ i915_gem_cleanup_ringbuffer(dev);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ /* Cancel the retire work handler, wait for it to finish if running
+ */
+ del_timer_sync(&dev_priv->mm.retire_timer);
+ cancel_delayed_work(dev_priv->wq);
+
+ return 0;
+}
+
+void i915_gem_l3_remap(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 misccpctl;
+ int i;
+
+ if (!HAS_L3_GPU_CACHE(dev))
+ return;
+
+ if (!dev_priv->l3_parity.remap_info)
+ return;
+
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ POSTING_READ(GEN7_MISCCPCTL);
+
+ for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
+ u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
+ if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
+ DRM_DEBUG("0x%x was already programmed to %x\n",
+ GEN7_L3LOG_BASE + i, remap);
+ if (remap && !dev_priv->l3_parity.remap_info[i/4])
+ DRM_DEBUG_DRIVER("Clearing remapped register\n");
+ I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
+ }
+
+ /* Make sure all the writes land before disabling dop clock gating */
+ POSTING_READ(GEN7_L3LOG_BASE);
+
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+}
+
+void i915_gem_init_swizzling(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen < 5 ||
+ dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ return;
+
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_TILE_SURFACE_SWIZZLING);
+
+ if (IS_GEN5(dev))
+ return;
+
+ I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
+ if (IS_GEN6(dev))
+ I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ else if (IS_GEN7(dev))
+ I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ /* LINTED */
+ else
+ BUG();
+}
+
+static bool
+intel_enable_blt(struct drm_device *dev)
+{
+ if (!HAS_BLT(dev))
+ return false;
+
+ return true;
+}
+
+static int i915_gem_init_rings(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = intel_init_render_ring_buffer(dev);
+ if (ret)
+ return ret;
+
+ if (HAS_BSD(dev)) {
+ ret = intel_init_bsd_ring_buffer(dev);
+ if (ret)
+ goto cleanup_render_ring;
+ }
+
+ if (intel_enable_blt(dev)) {
+ ret = intel_init_blt_ring_buffer(dev);
+ if (ret)
+ goto cleanup_bsd_ring;
+ }
+
+ if (HAS_VEBOX(dev)) {
+ ret = intel_init_vebox_ring_buffer(dev);
+ if (ret)
+ goto cleanup_blt_ring;
+ }
+
+
+ ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
+ if (ret)
+ goto cleanup_vebox_ring;
+
+ return 0;
+
+cleanup_vebox_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
+cleanup_blt_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
+cleanup_bsd_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
+cleanup_render_ring:
+ intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
+
+ return ret;
+}
+
+int
+i915_gem_init_hw(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
+ I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+
+ if (HAS_PCH_NOP(dev)) {
+ u32 temp = I915_READ(GEN7_MSG_CTL);
+ temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
+ I915_WRITE(GEN7_MSG_CTL, temp);
+ }
+
+ i915_gem_l3_remap(dev);
+
+ i915_gem_init_swizzling(dev);
+
+ ret = i915_gem_init_rings(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * XXX: There was some w/a described somewhere suggesting loading
+ * contexts before PPGTT.
+ */
+ i915_gem_context_init(dev);
+ if (dev_priv->mm.aliasing_ppgtt) {
+ ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
+ if (ret) {
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
+ }
+ }
+
+ return 0;
+}
+
+int i915_gem_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ int size;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (IS_VALLEYVIEW(dev)) {
+ /* VLVA0 (potential hack), BIOS isn't actually waking us */
+ I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
+ if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
+ DRM_DEBUG_DRIVER("allow wake ack timed out\n");
+ }
+
+ i915_gem_init_global_gtt(dev);
+
+ size = drm_getfb_size(dev);
+ dev_priv->fbcon_obj = NULL;
+ if (size > 0) {
+ /* save original fb GTT */
+ dev->old_gtt_size = size;
+ dev->old_gtt = kmem_zalloc(dev->old_gtt_size, KM_NOSLEEP);
+ intel_rw_gtt(dev, dev->old_gtt_size,
+ 0, (void *) dev->old_gtt, 0);
+
+ /*
+ * Some BIOSes fail to initialise the GTT, which will cause DMA faults when
+ * the IOMMU is enabled. We need to clear the whole GTT.
+ */
+ i915_clean_gtt(dev, size);
+
+ /* workaround: prealloc fb buffer, make sure the start address 0 */
+ dev_priv->fbcon_obj = i915_gem_alloc_object(dev, size);
+ if (!dev_priv->fbcon_obj) {
+ DRM_ERROR("failed to allocate framebuffer");
+ mutex_unlock(&dev->struct_mutex);
+ teardown_scratch_page(dev);
+ return (-ENOMEM);
+ }
+
+ /* copy old content to fb buffer */
+ (void) memcpy(dev_priv->fbcon_obj->base.kaddr, dev->old_gtt, size);
+
+ /* Flush everything out, we'll be doing GTT only from now on */
+ ret = intel_pin_and_fence_fb_obj(dev, dev_priv->fbcon_obj, false);
+ if (ret) {
+ DRM_ERROR("failed to pin fb ret %d", ret);
+ mutex_unlock(&dev->struct_mutex);
+ teardown_scratch_page(dev);
+ i915_gem_free_object(&dev_priv->fbcon_obj->base);
+ return ret;
+ }
+ }
+
+ dev_priv->mm.interruptible = true;
+
+ ret = i915_gem_init_hw(dev);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ return ret;
+ }
+
+ /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev_priv->dri1.allow_batchbuffer = 1;
+ return 0;
+}
+
+void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i)
+ intel_cleanup_ring_buffer(ring);
+}
+
+int
+/* LINTED */
+i915_gem_entervt_ioctl(DRM_IOCTL_ARGS)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
+ if (i915_reset_in_progress(&dev_priv->gpu_error)) {
+ DRM_ERROR("Reenabling wedged hardware, good luck\n");
+ atomic_set(&dev_priv->gpu_error.reset_counter, 0);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ dev_priv->mm.suspended = 0;
+
+ ret = i915_gem_init_hw(dev);
+ if (ret != 0) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ mutex_unlock(&dev->struct_mutex);
+
+ ret = drm_irq_install(dev);
+ if (ret)
+ goto cleanup_ringbuffer;
+
+ return 0;
+
+cleanup_ringbuffer:
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_cleanup_ringbuffer(dev);
+ dev_priv->mm.suspended = 1;
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int
+/* LINTED */
+i915_gem_leavevt_ioctl(DRM_IOCTL_ARGS)
+{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
+ (void ) drm_irq_uninstall(dev);
+ return i915_gem_idle(dev, 0);
+}
+
+void
+i915_gem_lastclose(struct drm_device *dev)
+{
+ int ret;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ ret = i915_gem_idle(dev, 1);
+ if (ret)
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+}
+
+static void
+init_ring_lists(struct intel_ring_buffer *ring)
+{
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+}
+
+void
+i915_gem_load(struct drm_device *dev)
+{
+ int i;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
+ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
+ INIT_LIST_HEAD(&dev_priv->mm.bound_list);
+ INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ init_ring_lists(&dev_priv->ring[i]);
+ for (i = 0; i < I915_MAX_NUM_FENCES; i++)
+ INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
+
+ INIT_WORK(&dev_priv->mm.retire_work, i915_gem_retire_work_handler);
+ setup_timer(&dev_priv->mm.retire_timer, i915_gem_retire_work_timer,
+ (void *)dev);
+
+ /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
+ if (IS_GEN3(dev)) {
+ I915_WRITE(MI_ARB_STATE,
+ _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+ }
+
+ dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
+
+ /* Old X drivers will take 0-2 for front, back, depth buffers */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev_priv->fence_reg_start = 3;
+
+ if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
+ dev_priv->num_fence_regs = 32;
+ else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ dev_priv->num_fence_regs = 16;
+ else
+ dev_priv->num_fence_regs = 8;
+
+ /* Initialize fence registers to zero */
+ INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ i915_gem_restore_fences(dev);
+
+ i915_gem_detect_bit_6_swizzle(dev);
+ DRM_INIT_WAITQUEUE(&dev_priv->pending_flip_queue, DRM_INTR_PRI(dev));
+ dev_priv->mm.interruptible = true;
+}
+
+/*
+ * Create a physically contiguous memory object for this object
+ * e.g. for cursor + overlay regs
+ */
+static int i915_gem_init_phys_object(struct drm_device *dev,
+ int id, int size, int align)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_phys_object *phys_obj;
+ int ret;
+
+ if (dev_priv->mm.phys_objs[id - 1] || !size)
+ return 0;
+
+ phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
+ if (!phys_obj)
+ return -ENOMEM;
+
+ phys_obj->id = id;
+
+ phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff, 1);
+ if (!phys_obj->handle) {
+ ret = -ENOMEM;
+ goto kfree_obj;
+ }
+
+ dev_priv->mm.phys_objs[id - 1] = phys_obj;
+
+ return 0;
+kfree_obj:
+ kfree(phys_obj, sizeof (struct drm_i915_gem_phys_object));
+ return ret;
+}
+
+static void i915_gem_free_phys_object(struct drm_device *dev, int id)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_phys_object *phys_obj;
+
+ if (!dev_priv->mm.phys_objs[id - 1])
+ return;
+
+ phys_obj = dev_priv->mm.phys_objs[id - 1];
+ if (phys_obj->cur_obj) {
+ i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
+ }
+
+ drm_pci_free(phys_obj->handle);
+ kfree(phys_obj, sizeof (struct drm_i915_gem_phys_object));
+ dev_priv->mm.phys_objs[id - 1] = NULL;
+}
+
+void i915_gem_free_all_phys_object(struct drm_device *dev)
+{
+ int i;
+
+ for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
+ i915_gem_free_phys_object(dev, i);
+}
+
+void i915_gem_detach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *obj)
+{
+ int i, ret;
+ int page_count;
+
+ if (!obj->phys_obj)
+ return;
+
+ if (!obj->page_list) {
+ ret = i915_gem_object_get_pages_gtt(obj);
+ if (ret)
+ goto out;
+ }
+
+ page_count = obj->base.size / PAGE_SIZE;
+
+ for (i = 0; i < page_count; i++) {
+ char *dst = obj->page_list[i];
+ char *src = (caddr_t)(obj->phys_obj->handle->vaddr + (i * PAGE_SIZE));
+
+ (void) memcpy(dst, src, PAGE_SIZE);
+ }
+ drm_clflush_pages(obj->page_list, page_count);
+ i915_gem_chipset_flush(dev);
+
+ i915_gem_object_put_pages_gtt(obj);
+out:
+ obj->phys_obj->cur_obj = NULL;
+ obj->phys_obj = NULL;
+}
+
+int
+i915_gem_attach_phys_object(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ int id,
+ int align)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret = 0;
+ int page_count;
+ int i;
+
+ if (id > I915_MAX_PHYS_OBJECT)
+ return -EINVAL;
+
+ if (obj->phys_obj) {
+ if (obj->phys_obj->id == id)
+ return 0;
+ i915_gem_detach_phys_object(dev, obj);
+ }
+
+ /* create a new object */
+ if (!dev_priv->mm.phys_objs[id - 1]) {
+ ret = i915_gem_init_phys_object(dev, id,
+ obj->base.size, align);
+ if (ret) {
+ DRM_ERROR("failed to init phys object %d size: %lu\n", id, obj->base.size);
+ goto out;
+ }
+ }
+
+ /* bind to the object */
+ obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
+ obj->phys_obj->cur_obj = obj;
+
+ if (!obj->page_list) {
+ ret = i915_gem_object_get_pages_gtt(obj);
+ if (ret) {
+ DRM_ERROR("failed to get page list\n");
+ goto out;
+ }
+ }
+
+ page_count = obj->base.size / PAGE_SIZE;
+
+ for (i = 0; i < page_count; i++) {
+ char *dst = obj->page_list[i];
+ char *src = (caddr_t)(obj->phys_obj->handle->vaddr + (i * PAGE_SIZE));
+ (void) memcpy(dst, src, PAGE_SIZE);
+
+ }
+
+ i915_gem_object_put_pages_gtt(obj);
+
+ return 0;
+out:
+ return ret;
+}
+
+static int
+i915_gem_phys_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ /* LINTED */
+ struct drm_file *file_priv)
+{
+ void *obj_addr;
+ int ret;
+ char __user *user_data;
+
+ user_data = (char __user *) (uintptr_t) args->data_ptr;
+ obj_addr = (void *)(uintptr_t)(obj->phys_obj->handle->vaddr + args->offset);
+
+ DRM_DEBUG("obj_addr %p, %ld\n", obj_addr, args->size);
+ ret = DRM_COPY_FROM_USER(obj_addr, user_data, args->size);
+ if (ret)
+ return -EFAULT;
+
+ i915_gem_chipset_flush(dev);
+ return 0;
+}
+
+void i915_gem_release(struct drm_device * dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ file_priv->status = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ /* i915_gpu_idle() generates warning message, so just ignore return */
+ (void) i915_gpu_idle(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ /* Clean up our request list when the client is going away, so that
+ * later retire_requests won't dereference our soon-to-be-gone
+ * file_priv.
+ */
+ spin_lock(&file_priv->mm.lock);
+ while (!list_empty(&file_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&file_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ client_list);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ }
+ spin_unlock(&file_priv->mm.lock);
+}
diff --git a/usr/src/uts/intel/io/i915/i915_gem_context.c b/usr/src/uts/intel/io/i915/i915_gem_context.c
new file mode 100644
index 0000000..55bc0a7
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_gem_context.c
@@ -0,0 +1,608 @@
+/*
+ * Copyright © 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+/*
+ * Copyright (c) 2013, Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * This file implements HW context support. On gen5+ a HW context consists of an
+ * opaque GPU object which is referenced at times of context saves and restores.
+ * With RC6 enabled, the context is also referenced as the GPU enters and exists
+ * from RC6 (GPU has it's own internal power context, except on gen5). Though
+ * something like a context does exist for the media ring, the code only
+ * supports contexts for the render ring.
+ *
+ * In software, there is a distinction between contexts created by the user,
+ * and the default HW context. The default HW context is used by GPU clients
+ * that do not request setup of their own hardware context. The default
+ * context's state is never restored to help prevent programming errors. This
+ * would happen if a client ran and piggy-backed off another clients GPU state.
+ * The default context only exists to give the GPU some offset to load as the
+ * current to invoke a save of the context we actually care about. In fact, the
+ * code could likely be constructed, albeit in a more complicated fashion, to
+ * never use the default context, though that limits the driver's ability to
+ * swap out, and/or destroy other contexts.
+ *
+ * All other contexts are created as a request by the GPU client. These contexts
+ * store GPU state, and thus allow GPU clients to not re-emit state (and
+ * potentially query certain state) at any time. The kernel driver makes
+ * certain that the appropriate commands are inserted.
+ *
+ * The context life cycle is semi-complicated in that context BOs may live
+ * longer than the context itself because of the way the hardware, and object
+ * tracking works. Below is a very crude representation of the state machine
+ * describing the context life.
+ * refcount pincount active
+ * S0: initial state 0 0 0
+ * S1: context created 1 0 0
+ * S2: context is currently running 2 1 X
+ * S3: GPU referenced, but not current 2 0 1
+ * S4: context is current, but destroyed 1 1 0
+ * S5: like S3, but destroyed 1 0 1
+ *
+ * The most common (but not all) transitions:
+ * S0->S1: client creates a context
+ * S1->S2: client submits execbuf with context
+ * S2->S3: other clients submits execbuf with context
+ * S3->S1: context object was retired
+ * S3->S2: clients submits another execbuf
+ * S2->S4: context destroy called with current context
+ * S3->S5->S0: destroy path
+ * S4->S5->S0: destroy path on current context
+ *
+ * There are two confusing terms used above:
+ * The "current context" means the context which is currently running on the
+ * GPU. The GPU has loaded it's state already and has stored away the gtt
+ * offset of the BO. The GPU is not actively referencing the data at this
+ * offset, but it will on the next context switch. The only way to avoid this
+ * is to do a GPU reset.
+ *
+ * An "active context' is one which was previously the "current context" and is
+ * on the active list waiting for the next context switch to occur. Until this
+ * happens, the object must remain at the same gtt offset. It is therefore
+ * possible to destroy a context, but it is still active.
+ *
+ */
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/* This is a HW constraint. The value below is the largest known requirement
+ * I've seen in a spec to date, and that was a workaround for a non-shipping
+ * part. It should be safe to decrease this, but it's more future proof as is.
+ */
+#define CONTEXT_ALIGN (64<<10)
+
+static struct i915_hw_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
+static int do_switch(struct i915_hw_context *to);
+
+static int get_context_size(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
+ u32 reg;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ reg = I915_READ(CXT_SIZE);
+ ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
+ break;
+ case 7:
+ reg = I915_READ(GEN7_CXT_SIZE);
+ if (IS_HASWELL(dev))
+ ret = HSW_CXT_TOTAL_SIZE;
+ else
+ ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
+ break;
+ default:
+ BUG();
+ }
+
+ return ret;
+}
+
+void i915_gem_context_free(struct kref *ctx_ref)
+{
+ struct i915_hw_context *ctx = container_of(ctx_ref,
+ struct i915_hw_context, ref);
+
+ drm_gem_object_unreference(&ctx->obj->base);
+ kfree(ctx, sizeof(*ctx));
+}
+
+static struct i915_hw_context *
+create_hw_context(struct drm_device *dev,
+ struct drm_i915_file_private *file_priv)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_context *ctx;
+ int ret, id;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx == NULL)
+ return NULL;
+
+ kref_init(&ctx->ref);
+ ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
+ if (ctx->obj == NULL) {
+ kfree(ctx, sizeof(*ctx));
+ DRM_DEBUG_DRIVER("Context object allocated failed\n");
+ return NULL;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 7) {
+ ret = i915_gem_object_set_cache_level(ctx->obj,
+ I915_CACHE_LLC_MLC);
+ if (ret)
+ goto err_out;
+ }
+
+ /* The ring associated with the context object is handled by the normal
+ * object tracking code. We give an initial ring value simple to pass an
+ * assertion in the context switch code.
+ */
+ ctx->ring = &dev_priv->ring[RCS];
+
+ /* Default context will never have a file_priv */
+ if (file_priv == NULL)
+ return ctx;
+
+
+again:
+ if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
+ ret = -ENOMEM;
+ DRM_DEBUG_DRIVER("idr allocation failed\n");
+ goto err_out;
+ }
+
+ ret = idr_get_new_above(&file_priv->context_idr, ctx,
+ DEFAULT_CONTEXT_ID + 1, &id);
+ ctx->file_priv = file_priv;
+ if (ret == 0)
+ ctx->id = id;
+
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ goto err_out;
+
+ return ctx;
+
+err_out:
+ i915_gem_context_unreference(ctx);
+ return NULL;
+}
+
+static inline bool is_default_context(struct i915_hw_context *ctx)
+{
+ return (ctx == ctx->ring->default_context);
+}
+
+/**
+ * The default context needs to exist per ring that uses contexts. It stores the
+ * context state of the GPU for applications that don't utilize HW contexts, as
+ * well as an idle case.
+ */
+static int create_default_context(struct drm_i915_private *dev_priv)
+{
+ struct i915_hw_context *ctx;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+
+ ctx = create_hw_context(dev_priv->dev, NULL);
+ if (ctx == NULL)
+ return (-ENOMEM);
+
+ /* We may need to do things with the shrinker which require us to
+ * immediately switch back to the default context. This can cause a
+ * problem as pinning the default context also requires GTT space which
+ * may not be available. To avoid this we always pin the
+ * default context.
+ */
+ dev_priv->ring[RCS].default_context = ctx;
+ ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
+ goto err_destroy;
+ }
+
+ ret = do_switch(ctx);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
+ goto err_unpin;
+ }
+
+ DRM_DEBUG_DRIVER("Default HW context loaded\n");
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(ctx->obj);
+err_destroy:
+ i915_gem_context_unreference(ctx);
+ return ret;
+}
+
+void i915_gem_context_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_HW_CONTEXTS(dev)) {
+ dev_priv->hw_contexts_disabled = true;
+ DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
+ return;
+ }
+
+ /* If called from reset, or thaw... we've been here already */
+ if (dev_priv->hw_contexts_disabled ||
+ dev_priv->ring[RCS].default_context)
+ return;
+
+ dev_priv->hw_context_size = ptob(DIV_ROUND_UP(get_context_size(dev), 4096));
+
+ if (dev_priv->hw_context_size > (1<<20)) {
+ dev_priv->hw_contexts_disabled = true;
+ DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
+ return;
+ }
+
+ if (create_default_context(dev_priv)) {
+ dev_priv->hw_contexts_disabled = true;
+ DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("HW context support initialized\n");
+}
+
+void i915_gem_context_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
+
+ if (dev_priv->hw_contexts_disabled)
+ return;
+
+ /* The only known way to stop the gpu from accessing the hw context is
+ * to reset it. Do this as the very last operation to avoid confusing
+ * other code, leading to spurious errors. */
+ intel_gpu_reset(dev);
+
+ i915_gem_object_unpin(dctx->obj);
+
+ /* When default context is created and switched to, base object refcount
+ * will be 2 (+1 from object creation and +1 from do_switch()).
+ * i915_gem_context_fini() will be called after gpu_idle() has switched
+ * to default context. So we need to unreference the base object once
+ * to offset the do_switch part, so that i915_gem_context_unreference()
+ * can then free the base object correctly. */
+ drm_gem_object_unreference(&dctx->obj->base);
+ i915_gem_context_unreference(dctx);
+}
+
+static int context_idr_cleanup(int id, void *p, void *data)
+{
+ struct i915_hw_context *ctx = p;
+
+ BUG_ON(id == DEFAULT_CONTEXT_ID);
+
+ i915_gem_context_unreference(ctx);
+ return 0;
+}
+
+struct i915_ctx_hang_stats *
+i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ u32 id)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_hw_context *to;
+
+ if (dev_priv->hw_contexts_disabled)
+ return NULL;
+
+ if (ring->id != RCS)
+ return NULL;
+
+ if (file == NULL)
+ return NULL;
+
+ if (id == DEFAULT_CONTEXT_ID)
+ return &file_priv->hang_stats;
+
+ to = i915_gem_context_get(file->driver_priv, id);
+ if (to == NULL)
+ return NULL;
+
+ return &to->hang_stats;
+}
+
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ mutex_lock(&dev->struct_mutex);
+ idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
+ idr_destroy(&file_priv->context_idr);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static struct i915_hw_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
+{
+ return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
+}
+
+static inline int
+mi_set_context(struct intel_ring_buffer *ring,
+ struct i915_hw_context *new_context,
+ u32 hw_flags)
+{
+ int ret;
+
+ /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
+ * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
+ * explicitly, so we rely on the value at ring init, stored in
+ * itlb_before_ctx_switch.
+ */
+ if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
+ if (IS_GEN7(ring->dev))
+ intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+ else
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_SET_CONTEXT);
+ intel_ring_emit(ring, new_context->obj->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ hw_flags);
+ /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
+ intel_ring_emit(ring, MI_NOOP);
+
+ if (IS_GEN7(ring->dev))
+ intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+ else
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
+
+ return ret;
+}
+
+static int do_switch(struct i915_hw_context *to)
+{
+ struct intel_ring_buffer *ring = to->ring;
+ struct i915_hw_context *from = ring->last_context;
+ u32 hw_flags = 0;
+ int ret;
+
+ BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
+
+ if (from == to)
+ return 0;
+
+ ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
+ if (ret)
+ return ret;
+
+ /* Clear this page out of any CPU caches for coherent swap-in/out. Note
+ * that thanks to write = false in this call and us not setting any gpu
+ * write domains when putting a context object onto the active list
+ * (when switching away from it), this won't block.
+ * XXX: We need a real interface to do this instead of trickery. */
+ ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
+ if (ret) {
+ i915_gem_object_unpin(to->obj);
+ return ret;
+ }
+
+ if (!to->obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
+
+ if (!to->is_initialized || is_default_context(to))
+ hw_flags |= MI_RESTORE_INHIBIT;
+ else if (from == to) {
+ /* not yet expected */
+ hw_flags |= MI_FORCE_RESTORE;
+ WARN_ON(from == to);
+ }
+
+ ret = mi_set_context(ring, to, hw_flags);
+ if (ret) {
+ i915_gem_object_unpin(to->obj);
+ return ret;
+ }
+
+ /* The backing object for the context is done after switching to the
+ * *next* context. Therefore we cannot retire the previous context until
+ * the next context has already started running. In fact, the below code
+ * is a bit suboptimal because the retiring can occur simply after the
+ * MI_SET_CONTEXT instead of when the next seqno has completed.
+ */
+ if (from != NULL) {
+ from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ i915_gem_object_move_to_active(from->obj, ring);
+ /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
+ * whole damn pipeline, we don't need to explicitly mark the
+ * object dirty. The only exception is that the context must be
+ * correct in case the object gets swapped out. Ideally we'd be
+ * able to defer doing this until we know the object would be
+ * swapped, but there is no way to do that yet.
+ */
+ from->obj->dirty = 1;
+ BUG_ON(from->obj->ring != ring);
+
+ ret = i915_add_request(ring, NULL);
+ if (ret) {
+ /* Too late, we've already scheduled a context switch.
+ * Try to undo the change so that the hw state is
+ * consistent with out tracking. In case of emergency,
+ * scream.
+ */
+ WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
+ return ret;
+ }
+
+ i915_gem_object_unpin(from->obj);
+ i915_gem_context_unreference(from);
+ }
+
+ i915_gem_context_reference(to);
+ ring->last_context = to;
+ to->is_initialized = true;
+
+ return 0;
+}
+
+/**
+ * i915_switch_context() - perform a GPU context switch.
+ * @ring: ring for which we'll execute the context switch
+ * @file_priv: file_priv associated with the context, may be NULL
+ * @id: context id number
+ * @seqno: sequence number by which the new context will be switched to
+ * @flags:
+ *
+ * The context life cycle is simple. The context refcount is incremented and
+ * decremented by 1 and create and destroy. If the context is in use by the GPU,
+ * it will have a refoucnt > 1. This allows us to destroy the context abstract
+ * object while letting the normal object tracking destroy the backing BO.
+ */
+int i915_switch_context(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ int to_id)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct i915_hw_context *to;
+
+ if (dev_priv->hw_contexts_disabled)
+ return 0;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+
+ if (ring != &dev_priv->ring[RCS])
+ return 0;
+
+ if (to_id == DEFAULT_CONTEXT_ID) {
+ to = ring->default_context;
+ } else {
+ if (file == NULL)
+ return -EINVAL;
+
+ to = i915_gem_context_get(file->driver_priv, to_id);
+ if (to == NULL)
+ return -ENOENT;
+ }
+
+ return do_switch(to);
+}
+
+int i915_gem_context_create_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_context_create *args = data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_hw_context *ctx;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ if (dev_priv->hw_contexts_disabled)
+ return -ENODEV;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = create_hw_context(dev, file_priv);
+ mutex_unlock(&dev->struct_mutex);
+ if (ctx == NULL)
+ return (-ENOMEM);
+
+ args->ctx_id = ctx->id;
+ DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
+
+ return 0;
+}
+
+int i915_gem_context_destroy_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_context_destroy *args = data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_hw_context *ctx;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ if (!ctx) {
+ mutex_unlock(&dev->struct_mutex);
+ return -ENOENT;
+ }
+
+ idr_remove(&ctx->file_priv->context_idr, ctx->id);
+ i915_gem_context_unreference(ctx);
+ mutex_unlock(&dev->struct_mutex);
+
+ DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
+ return 0;
+}
+
+void i915_gem_context_reference(struct i915_hw_context *ctx)
+{
+ kref_get(&ctx->ref);
+}
+
+void i915_gem_context_unreference(struct i915_hw_context *ctx)
+{
+ kref_put(&ctx->ref, i915_gem_context_free);
+}
diff --git a/usr/src/uts/intel/io/i915/i915_gem_debug.c b/usr/src/uts/intel/io/i915/i915_gem_debug.c
new file mode 100644
index 0000000..23f88bf
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_gem_debug.c
@@ -0,0 +1,952 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+int gpu_dump = B_TRUE;
+int gpu_panic_on_hang = B_FALSE;
+
+#define BUFFER_FAIL(_count, _len, _name) { \
+ DRM_ERROR("Buffer size too small in %s (%d < %d)", \
+ (_name), (_count), (_len)); \
+ (*failures)++; \
+ return count; \
+}
+
+
+static uint32_t saved_s2 = 0, saved_s4 = 0;
+static char saved_s2_set = 0, saved_s4_set = 0;
+
+
+#define MAX_INSTDONE_BITS 100
+
+struct instdone_bit {
+ uint32_t reg;
+ uint32_t bit;
+ const char *name;
+};
+
+struct instdone_bit instdone_bits[MAX_INSTDONE_BITS];
+int num_instdone_bits = 0;
+
+static void
+add_instdone_bit(uint32_t reg, uint32_t bit, const char *name)
+{
+ instdone_bits[num_instdone_bits].reg = reg;
+ instdone_bits[num_instdone_bits].bit = bit;
+ instdone_bits[num_instdone_bits].name = name;
+ num_instdone_bits++;
+}
+
+static void
+gen3_instdone_bit(uint32_t bit, const char *name)
+{
+ add_instdone_bit(INST_DONE, bit, name);
+}
+
+static void
+gen4_instdone_bit(uint32_t bit, const char *name)
+{
+ add_instdone_bit(INST_DONE_I965, bit, name);
+}
+
+static void
+gen4_instdone1_bit(uint32_t bit, const char *name)
+{
+ add_instdone_bit(INST_DONE_1, bit, name);
+}
+
+static void
+gen6_instdone1_bit(uint32_t bit, const char *name)
+{
+ add_instdone_bit(GEN6_INSTDONE_1, bit, name);
+}
+
+static void
+gen6_instdone2_bit(uint32_t bit, const char *name)
+{
+ add_instdone_bit(GEN6_INSTDONE_2, bit, name);
+}
+
+static void
+init_g965_instdone1(void)
+{
+ gen4_instdone1_bit(I965_GW_CS_DONE_CR, "GW CS CR");
+ gen4_instdone1_bit(I965_SVSM_CS_DONE_CR, "SVSM CS CR");
+ gen4_instdone1_bit(I965_SVDW_CS_DONE_CR, "SVDW CS CR");
+ gen4_instdone1_bit(I965_SVDR_CS_DONE_CR, "SVDR CS CR");
+ gen4_instdone1_bit(I965_SVRW_CS_DONE_CR, "SVRW CS CR");
+ gen4_instdone1_bit(I965_SVRR_CS_DONE_CR, "SVRR CS CR");
+ gen4_instdone1_bit(I965_SVTW_CS_DONE_CR, "SVTW CS CR");
+ gen4_instdone1_bit(I965_MASM_CS_DONE_CR, "MASM CS CR");
+ gen4_instdone1_bit(I965_MASF_CS_DONE_CR, "MASF CS CR");
+ gen4_instdone1_bit(I965_MAW_CS_DONE_CR, "MAW CS CR");
+ gen4_instdone1_bit(I965_EM1_CS_DONE_CR, "EM1 CS CR");
+ gen4_instdone1_bit(I965_EM0_CS_DONE_CR, "EM0 CS CR");
+ gen4_instdone1_bit(I965_UC1_CS_DONE, "UC1 CS");
+ gen4_instdone1_bit(I965_UC0_CS_DONE, "UC0 CS");
+ gen4_instdone1_bit(I965_URB_CS_DONE, "URB CS");
+ gen4_instdone1_bit(I965_ISC_CS_DONE, "ISC CS");
+ gen4_instdone1_bit(I965_CL_CS_DONE, "CL CS");
+ gen4_instdone1_bit(I965_GS_CS_DONE, "GS CS");
+ gen4_instdone1_bit(I965_VS0_CS_DONE, "VS0 CS");
+ gen4_instdone1_bit(I965_VF_CS_DONE, "VF CS");
+}
+
+static void
+init_g4x_instdone1(void)
+{
+ gen4_instdone1_bit(G4X_BCS_DONE, "BCS");
+ gen4_instdone1_bit(G4X_CS_DONE, "CS");
+ gen4_instdone1_bit(G4X_MASF_DONE, "MASF");
+ gen4_instdone1_bit(G4X_SVDW_DONE, "SVDW");
+ gen4_instdone1_bit(G4X_SVDR_DONE, "SVDR");
+ gen4_instdone1_bit(G4X_SVRW_DONE, "SVRW");
+ gen4_instdone1_bit(G4X_SVRR_DONE, "SVRR");
+ gen4_instdone1_bit(G4X_ISC_DONE, "ISC");
+ gen4_instdone1_bit(G4X_MT_DONE, "MT");
+ gen4_instdone1_bit(G4X_RC_DONE, "RC");
+ gen4_instdone1_bit(G4X_DAP_DONE, "DAP");
+ gen4_instdone1_bit(G4X_MAWB_DONE, "MAWB");
+ gen4_instdone1_bit(G4X_MT_IDLE, "MT idle");
+ //gen4_instdone1_bit(G4X_GBLT_BUSY, "GBLT");
+ gen4_instdone1_bit(G4X_SVSM_DONE, "SVSM");
+ gen4_instdone1_bit(G4X_MASM_DONE, "MASM");
+ gen4_instdone1_bit(G4X_QC_DONE, "QC");
+ gen4_instdone1_bit(G4X_FL_DONE, "FL");
+ gen4_instdone1_bit(G4X_SC_DONE, "SC");
+ gen4_instdone1_bit(G4X_DM_DONE, "DM");
+ gen4_instdone1_bit(G4X_FT_DONE, "FT");
+ gen4_instdone1_bit(G4X_DG_DONE, "DG");
+ gen4_instdone1_bit(G4X_SI_DONE, "SI");
+ gen4_instdone1_bit(G4X_SO_DONE, "SO");
+ gen4_instdone1_bit(G4X_PL_DONE, "PL");
+ gen4_instdone1_bit(G4X_WIZ_DONE, "WIZ");
+ gen4_instdone1_bit(G4X_URB_DONE, "URB");
+ gen4_instdone1_bit(G4X_SF_DONE, "SF");
+ gen4_instdone1_bit(G4X_CL_DONE, "CL");
+ gen4_instdone1_bit(G4X_GS_DONE, "GS");
+ gen4_instdone1_bit(G4X_VS0_DONE, "VS0");
+ gen4_instdone1_bit(G4X_VF_DONE, "VF");
+}
+
+void
+init_instdone_definitions(struct drm_device *dev)
+{
+ if (IS_GEN6(dev)) {
+ /* Now called INSTDONE_1 in the docs. */
+ gen6_instdone1_bit(GEN6_MA_3_DONE, "Message Arbiter 3");
+ gen6_instdone1_bit(GEN6_EU_32_DONE, "EU 32");
+ gen6_instdone1_bit(GEN6_EU_31_DONE, "EU 31");
+ gen6_instdone1_bit(GEN6_EU_30_DONE, "EU 30");
+ gen6_instdone1_bit(GEN6_MA_3_DONE, "Message Arbiter 2");
+ gen6_instdone1_bit(GEN6_EU_22_DONE, "EU 22");
+ gen6_instdone1_bit(GEN6_EU_21_DONE, "EU 21");
+ gen6_instdone1_bit(GEN6_EU_20_DONE, "EU 20");
+ gen6_instdone1_bit(GEN6_MA_3_DONE, "Message Arbiter 1");
+ gen6_instdone1_bit(GEN6_EU_12_DONE, "EU 12");
+ gen6_instdone1_bit(GEN6_EU_11_DONE, "EU 11");
+ gen6_instdone1_bit(GEN6_EU_10_DONE, "EU 10");
+ gen6_instdone1_bit(GEN6_MA_3_DONE, "Message Arbiter 0");
+ gen6_instdone1_bit(GEN6_EU_02_DONE, "EU 02");
+ gen6_instdone1_bit(GEN6_EU_01_DONE, "EU 01");
+ gen6_instdone1_bit(GEN6_EU_00_DONE, "EU 00");
+
+ gen6_instdone1_bit(GEN6_IC_3_DONE, "IC 3");
+ gen6_instdone1_bit(GEN6_IC_2_DONE, "IC 2");
+ gen6_instdone1_bit(GEN6_IC_1_DONE, "IC 1");
+ gen6_instdone1_bit(GEN6_IC_0_DONE, "IC 0");
+ gen6_instdone1_bit(GEN6_ISC_10_DONE, "ISC 1/0");
+ gen6_instdone1_bit(GEN6_ISC_32_DONE, "ISC 3/2");
+
+ gen6_instdone1_bit(GEN6_VSC_DONE, "VSC");
+ gen6_instdone1_bit(GEN6_IEF_DONE, "IEF");
+ gen6_instdone1_bit(GEN6_VFE_DONE, "VFE");
+ gen6_instdone1_bit(GEN6_TD_DONE, "TD");
+ gen6_instdone1_bit(GEN6_TS_DONE, "TS");
+ gen6_instdone1_bit(GEN6_GW_DONE, "GW");
+ gen6_instdone1_bit(GEN6_HIZ_DONE, "HIZ");
+ gen6_instdone1_bit(GEN6_AVS_DONE, "AVS");
+
+ /* Now called INSTDONE_2 in the docs. */
+ gen6_instdone2_bit(GEN6_GAM_DONE, "GAM");
+ gen6_instdone2_bit(GEN6_CS_DONE, "CS");
+ gen6_instdone2_bit(GEN6_WMBE_DONE, "WMBE");
+ gen6_instdone2_bit(GEN6_SVRW_DONE, "SVRW");
+ gen6_instdone2_bit(GEN6_RCC_DONE, "RCC");
+ gen6_instdone2_bit(GEN6_SVG_DONE, "SVG");
+ gen6_instdone2_bit(GEN6_ISC_DONE, "ISC");
+ gen6_instdone2_bit(GEN6_MT_DONE, "MT");
+ gen6_instdone2_bit(GEN6_RCPFE_DONE, "RCPFE");
+ gen6_instdone2_bit(GEN6_RCPBE_DONE, "RCPBE");
+ gen6_instdone2_bit(GEN6_VDI_DONE, "VDI");
+ gen6_instdone2_bit(GEN6_RCZ_DONE, "RCZ");
+ gen6_instdone2_bit(GEN6_DAP_DONE, "DAP");
+ gen6_instdone2_bit(GEN6_PSD_DONE, "PSD");
+ gen6_instdone2_bit(GEN6_IZ_DONE, "IZ");
+ gen6_instdone2_bit(GEN6_WMFE_DONE, "WMFE");
+ gen6_instdone2_bit(GEN6_SVSM_DONE, "SVSM");
+ gen6_instdone2_bit(GEN6_QC_DONE, "QC");
+ gen6_instdone2_bit(GEN6_FL_DONE, "FL");
+ gen6_instdone2_bit(GEN6_SC_DONE, "SC");
+ gen6_instdone2_bit(GEN6_DM_DONE, "DM");
+ gen6_instdone2_bit(GEN6_FT_DONE, "FT");
+ gen6_instdone2_bit(GEN6_DG_DONE, "DG");
+ gen6_instdone2_bit(GEN6_SI_DONE, "SI");
+ gen6_instdone2_bit(GEN6_SO_DONE, "SO");
+ gen6_instdone2_bit(GEN6_PL_DONE, "PL");
+ gen6_instdone2_bit(GEN6_VME_DONE, "VME");
+ gen6_instdone2_bit(GEN6_SF_DONE, "SF");
+ gen6_instdone2_bit(GEN6_CL_DONE, "CL");
+ gen6_instdone2_bit(GEN6_GS_DONE, "GS");
+ gen6_instdone2_bit(GEN6_VS0_DONE, "VS0");
+ gen6_instdone2_bit(GEN6_VF_DONE, "VF");
+ } else if (IS_GEN5(dev)) {
+ gen4_instdone_bit(ILK_ROW_0_EU_0_DONE, "Row 0, EU 0");
+ gen4_instdone_bit(ILK_ROW_0_EU_1_DONE, "Row 0, EU 1");
+ gen4_instdone_bit(ILK_ROW_0_EU_2_DONE, "Row 0, EU 2");
+ gen4_instdone_bit(ILK_ROW_0_EU_3_DONE, "Row 0, EU 3");
+ gen4_instdone_bit(ILK_ROW_1_EU_0_DONE, "Row 1, EU 0");
+ gen4_instdone_bit(ILK_ROW_1_EU_1_DONE, "Row 1, EU 1");
+ gen4_instdone_bit(ILK_ROW_1_EU_2_DONE, "Row 1, EU 2");
+ gen4_instdone_bit(ILK_ROW_1_EU_3_DONE, "Row 1, EU 3");
+ gen4_instdone_bit(ILK_ROW_2_EU_0_DONE, "Row 2, EU 0");
+ gen4_instdone_bit(ILK_ROW_2_EU_1_DONE, "Row 2, EU 1");
+ gen4_instdone_bit(ILK_ROW_2_EU_2_DONE, "Row 2, EU 2");
+ gen4_instdone_bit(ILK_ROW_2_EU_3_DONE, "Row 2, EU 3");
+ gen4_instdone_bit(ILK_VCP_DONE, "VCP");
+ gen4_instdone_bit(ILK_ROW_0_MATH_DONE, "Row 0 math");
+ gen4_instdone_bit(ILK_ROW_1_MATH_DONE, "Row 1 math");
+ gen4_instdone_bit(ILK_ROW_2_MATH_DONE, "Row 2 math");
+ gen4_instdone_bit(ILK_VC1_DONE, "VC1");
+ gen4_instdone_bit(ILK_ROW_0_MA_DONE, "Row 0 MA");
+ gen4_instdone_bit(ILK_ROW_1_MA_DONE, "Row 1 MA");
+ gen4_instdone_bit(ILK_ROW_2_MA_DONE, "Row 2 MA");
+ gen4_instdone_bit(ILK_ROW_0_ISC_DONE, "Row 0 ISC");
+ gen4_instdone_bit(ILK_ROW_1_ISC_DONE, "Row 1 ISC");
+ gen4_instdone_bit(ILK_ROW_2_ISC_DONE, "Row 2 ISC");
+ gen4_instdone_bit(ILK_VFE_DONE, "VFE");
+ gen4_instdone_bit(ILK_TD_DONE, "TD");
+ gen4_instdone_bit(ILK_SVTS_DONE, "SVTS");
+ gen4_instdone_bit(ILK_TS_DONE, "TS");
+ gen4_instdone_bit(ILK_GW_DONE, "GW");
+ gen4_instdone_bit(ILK_AI_DONE, "AI");
+ gen4_instdone_bit(ILK_AC_DONE, "AC");
+ gen4_instdone_bit(ILK_AM_DONE, "AM");
+
+ init_g4x_instdone1();
+ } else if (IS_GEN4(dev)) {
+ gen4_instdone_bit(I965_ROW_0_EU_0_DONE, "Row 0, EU 0");
+ gen4_instdone_bit(I965_ROW_0_EU_1_DONE, "Row 0, EU 1");
+ gen4_instdone_bit(I965_ROW_0_EU_2_DONE, "Row 0, EU 2");
+ gen4_instdone_bit(I965_ROW_0_EU_3_DONE, "Row 0, EU 3");
+ gen4_instdone_bit(I965_ROW_1_EU_0_DONE, "Row 1, EU 0");
+ gen4_instdone_bit(I965_ROW_1_EU_1_DONE, "Row 1, EU 1");
+ gen4_instdone_bit(I965_ROW_1_EU_2_DONE, "Row 1, EU 2");
+ gen4_instdone_bit(I965_ROW_1_EU_3_DONE, "Row 1, EU 3");
+ gen4_instdone_bit(I965_SF_DONE, "Strips and Fans");
+ gen4_instdone_bit(I965_SE_DONE, "Setup Engine");
+ gen4_instdone_bit(I965_WM_DONE, "Windowizer");
+ gen4_instdone_bit(I965_DISPATCHER_DONE, "Dispatcher");
+ gen4_instdone_bit(I965_PROJECTION_DONE, "Projection and LOD");
+ gen4_instdone_bit(I965_DG_DONE, "Dependent address generator");
+ gen4_instdone_bit(I965_QUAD_CACHE_DONE, "Texture fetch");
+ gen4_instdone_bit(I965_TEXTURE_FETCH_DONE, "Texture fetch");
+ gen4_instdone_bit(I965_TEXTURE_DECOMPRESS_DONE, "Texture decompress");
+ gen4_instdone_bit(I965_SAMPLER_CACHE_DONE, "Sampler cache");
+ gen4_instdone_bit(I965_FILTER_DONE, "Filtering");
+ gen4_instdone_bit(I965_BYPASS_DONE, "Bypass FIFO");
+ gen4_instdone_bit(I965_PS_DONE, "Pixel shader");
+ gen4_instdone_bit(I965_CC_DONE, "Color calculator");
+ gen4_instdone_bit(I965_MAP_FILTER_DONE, "Map filter");
+ gen4_instdone_bit(I965_MAP_L2_IDLE, "Map L2");
+ gen4_instdone_bit(I965_MA_ROW_0_DONE, "Message Arbiter row 0");
+ gen4_instdone_bit(I965_MA_ROW_1_DONE, "Message Arbiter row 1");
+ gen4_instdone_bit(I965_IC_ROW_0_DONE, "Instruction cache row 0");
+ gen4_instdone_bit(I965_IC_ROW_1_DONE, "Instruction cache row 1");
+ gen4_instdone_bit(I965_CP_DONE, "Command Processor");
+
+ if (IS_G4X(dev)) {
+ init_g4x_instdone1();
+ } else {
+ init_g965_instdone1();
+ }
+ } else if (IS_GEN3(dev)) {
+ gen3_instdone_bit(IDCT_DONE, "IDCT");
+ gen3_instdone_bit(IQ_DONE, "IQ");
+ gen3_instdone_bit(PR_DONE, "PR");
+ gen3_instdone_bit(VLD_DONE, "VLD");
+ gen3_instdone_bit(IP_DONE, "Instruction parser");
+ gen3_instdone_bit(FBC_DONE, "Framebuffer Compression");
+ gen3_instdone_bit(BINNER_DONE, "Binner");
+ gen3_instdone_bit(SF_DONE, "Strips and fans");
+ gen3_instdone_bit(SE_DONE, "Setup engine");
+ gen3_instdone_bit(WM_DONE, "Windowizer");
+ gen3_instdone_bit(IZ_DONE, "Intermediate Z");
+ gen3_instdone_bit(PERSPECTIVE_INTERP_DONE, "Perspective interpolation");
+ gen3_instdone_bit(DISPATCHER_DONE, "Dispatcher");
+ gen3_instdone_bit(PROJECTION_DONE, "Projection and LOD");
+ gen3_instdone_bit(DEPENDENT_ADDRESS_DONE, "Dependent address calculation");
+ gen3_instdone_bit(TEXTURE_FETCH_DONE, "Texture fetch");
+ gen3_instdone_bit(TEXTURE_DECOMPRESS_DONE, "Texture decompression");
+ gen3_instdone_bit(SAMPLER_CACHE_DONE, "Sampler Cache");
+ gen3_instdone_bit(FILTER_DONE, "Filtering");
+ gen3_instdone_bit(BYPASS_FIFO_DONE, "Bypass FIFO");
+ gen3_instdone_bit(PS_DONE, "Pixel shader");
+ gen3_instdone_bit(CC_DONE, "Color calculator");
+ gen3_instdone_bit(MAP_FILTER_DONE, "Map filter");
+ gen3_instdone_bit(MAP_L2_IDLE, "Map L2");
+ } else {
+ gen3_instdone_bit(I830_GMBUS_DONE, "GMBUS");
+ gen3_instdone_bit(I830_FBC_DONE, "FBC");
+ gen3_instdone_bit(I830_BINNER_DONE, "BINNER");
+ gen3_instdone_bit(I830_MPEG_DONE, "MPEG");
+ gen3_instdone_bit(I830_MECO_DONE, "MECO");
+ gen3_instdone_bit(I830_MCD_DONE, "MCD");
+ gen3_instdone_bit(I830_MCSTP_DONE, "MCSTP");
+ gen3_instdone_bit(I830_CC_DONE, "CC");
+ gen3_instdone_bit(I830_DG_DONE, "DG");
+ gen3_instdone_bit(I830_DCMP_DONE, "DCMP");
+ gen3_instdone_bit(I830_FTCH_DONE, "FTCH");
+ gen3_instdone_bit(I830_IT_DONE, "IT");
+ gen3_instdone_bit(I830_MG_DONE, "MG");
+ gen3_instdone_bit(I830_MEC_DONE, "MEC");
+ gen3_instdone_bit(I830_PC_DONE, "PC");
+ gen3_instdone_bit(I830_QCC_DONE, "QCC");
+ gen3_instdone_bit(I830_TB_DONE, "TB");
+ gen3_instdone_bit(I830_WM_DONE, "WM");
+ gen3_instdone_bit(I830_EF_DONE, "EF");
+ gen3_instdone_bit(I830_BLITTER_DONE, "Blitter");
+ gen3_instdone_bit(I830_MAP_L2_DONE, "Map L2 cache");
+ gen3_instdone_bit(I830_SECONDARY_RING_3_DONE, "Secondary ring 3");
+ gen3_instdone_bit(I830_SECONDARY_RING_2_DONE, "Secondary ring 2");
+ gen3_instdone_bit(I830_SECONDARY_RING_1_DONE, "Secondary ring 1");
+ gen3_instdone_bit(I830_SECONDARY_RING_0_DONE, "Secondary ring 0");
+ gen3_instdone_bit(I830_PRIMARY_RING_1_DONE, "Primary ring 1");
+ gen3_instdone_bit(I830_PRIMARY_RING_0_DONE, "Primary ring 0");
+ }
+}
+#define ring_read(ring, reg) I915_READ(ring->mmio + reg)
+#define MAX_NUM_TOP_BITS 100
+#define SAMPLES_PER_SEC 10000
+#define SAMPLES_TO_PERCENT_RATIO (SAMPLES_PER_SEC / 100)
+
+#define HAS_STATS_REGS(dev) (IS_GEN4(dev) || \
+ IS_GEN5(dev) || \
+ IS_GEN6(dev))
+struct top_bit {
+ struct instdone_bit *bit;
+ int count;
+} top_bits[MAX_NUM_TOP_BITS];
+struct top_bit *top_bits_sorted[MAX_NUM_TOP_BITS];
+static uint32_t instdone, instdone1;
+
+struct ring {
+ const char *name;
+ uint32_t mmio, size;
+ int head, tail;
+ uint64_t full;
+ int idle;
+};
+
+enum stats_counts {
+ IA_VERTICES,
+ IA_PRIMITIVES,
+ VS_INVOCATION,
+ GS_INVOCATION,
+ GS_PRIMITIVES,
+ CL_INVOCATION,
+ CL_PRIMITIVES,
+ PS_INVOCATION,
+ PS_DEPTH,
+ STATS_COUNT
+};
+
+const uint32_t stats_regs[STATS_COUNT] = {
+ IA_VERTICES_COUNT_QW,
+ IA_PRIMITIVES_COUNT_QW,
+ VS_INVOCATION_COUNT_QW,
+ GS_INVOCATION_COUNT_QW,
+ GS_PRIMITIVES_COUNT_QW,
+ CL_INVOCATION_COUNT_QW,
+ CL_PRIMITIVES_COUNT_QW,
+ PS_INVOCATION_COUNT_QW,
+ PS_DEPTH_COUNT_QW,
+};
+
+const char *stats_reg_names[STATS_COUNT] = {
+ "vert fetch",
+ "prim fetch",
+ "VS invocations",
+ "GS invocations",
+ "GS prims",
+ "CL invocations",
+ "CL prims",
+ "PS invocations",
+ "PS depth pass",
+};
+
+uint64_t stats[STATS_COUNT];
+uint64_t last_stats[STATS_COUNT];
+
+static unsigned long
+gettime(void)
+{
+ struct timeval t;
+ do_gettimeofday(&t);
+ return (t.tv_usec + (t.tv_sec * 1000000));
+}
+
+static int
+top_bits_sort(const void *a, const void *b)
+{
+ struct top_bit * const *bit_a = a;
+ struct top_bit * const *bit_b = b;
+ int a_count = (*bit_a)->count;
+ int b_count = (*bit_b)->count;
+
+ if (a_count < b_count)
+ return 1;
+ else if (a_count == b_count)
+ return 0;
+ else
+ return -1;
+}
+
+static void
+update_idle_bit(struct top_bit *top_bit)
+{
+ uint32_t reg_val;
+
+ if (top_bit->bit->reg == INST_DONE_1)
+ reg_val = instdone1;
+ else
+ reg_val = instdone;
+
+ if ((reg_val & top_bit->bit->bit) == 0)
+ top_bit->count++;
+}
+
+static void
+print_clock(char *name, int clock) {
+ if (clock == -1)
+ DRM_ERROR("%s clock: unknown", name);
+ else
+ DRM_ERROR("%s clock: %d Mhz", name, clock);
+}
+
+static void
+print_clock_info(struct drm_device *dev)
+{
+ uint16_t gcfgc;
+
+ if (IS_GM45(dev)) {
+ int core_clock = -1;
+
+ (void) pci_read_config_word(dev->pdev, I915_GCFGC, &gcfgc);
+
+ switch (gcfgc & 0xf) {
+ case 8:
+ core_clock = 266;
+ break;
+ case 9:
+ core_clock = 320;
+ break;
+ case 11:
+ core_clock = 400;
+ break;
+ case 13:
+ core_clock = 533;
+ break;
+ }
+ print_clock("core", core_clock);
+ } else if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
+ int render_clock = -1, sampler_clock = -1;
+
+ (void) pci_read_config_word(dev->pdev, I915_GCFGC, &gcfgc);
+
+ switch (gcfgc & 0xf) {
+ case 2:
+ render_clock = 250; sampler_clock = 267;
+ break;
+ case 3:
+ render_clock = 320; sampler_clock = 333;
+ break;
+ case 4:
+ render_clock = 400; sampler_clock = 444;
+ break;
+ case 5:
+ render_clock = 500; sampler_clock = 533;
+ break;
+ }
+
+ print_clock("render", render_clock);
+ print_clock("sampler", sampler_clock);
+ } else if (IS_I945GM(dev) && IS_MOBILE(dev)) {
+ int render_clock = -1, display_clock = -1;
+
+ (void) pci_read_config_word(dev->pdev, I915_GCFGC, &gcfgc);
+
+ switch (gcfgc & 0x7) {
+ case 0:
+ render_clock = 166;
+ break;
+ case 1:
+ render_clock = 200;
+ break;
+ case 3:
+ render_clock = 250;
+ break;
+ case 5:
+ render_clock = 400;
+ break;
+ }
+
+ switch (gcfgc & 0x70) {
+ case 0:
+ display_clock = 200;
+ break;
+ case 4:
+ display_clock = 320;
+ break;
+ }
+ if (gcfgc & (1 << 7))
+ display_clock = 133;
+
+ print_clock("render", render_clock);
+ print_clock("display", display_clock);
+ } else if (IS_I915GM(dev) && IS_MOBILE(dev)) {
+ int render_clock = -1, display_clock = -1;
+
+ (void) pci_read_config_word(dev->pdev, I915_GCFGC, &gcfgc);
+
+ switch (gcfgc & 0x7) {
+ case 0:
+ render_clock = 160;
+ break;
+ case 1:
+ render_clock = 190;
+ break;
+ case 4:
+ render_clock = 333;
+ break;
+ }
+ if (gcfgc & (1 << 13))
+ render_clock = 133;
+
+ switch (gcfgc & 0x70) {
+ case 0:
+ display_clock = 190;
+ break;
+ case 4:
+ display_clock = 333;
+ break;
+ }
+ if (gcfgc & (1 << 7))
+ display_clock = 133;
+
+ print_clock("render", render_clock);
+ print_clock("display", display_clock);
+ }
+}
+
+static void gen6_force_wake_get(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int count;
+
+ if (!IS_GEN6(dev))
+ return;
+
+ /* This will probably have undesirable side-effects upon the system. */
+ count = 0;
+ while (count++ < 50 && (I915_READ(FORCEWAKE_ACK) & 1))
+ udelay(10);
+
+ I915_WRITE(FORCEWAKE, 1);
+ POSTING_READ(FORCEWAKE);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ(FORCEWAKE_ACK) & 1) == 0)
+ udelay(10);
+}
+
+static void gen6_force_wake_put(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (!IS_GEN6(dev))
+ return;
+
+ I915_WRITE(FORCEWAKE, 0);
+ POSTING_READ(FORCEWAKE);
+}
+
+static void ring_init(struct drm_device *dev, struct ring *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ gen6_force_wake_get(dev);
+ ring->size = (((ring_read(ring, _RING_LEN) & RING_NR_PAGES) >> 12) + 1) * 4096;
+ gen6_force_wake_put(dev);
+}
+
+static void ring_reset(struct ring *ring)
+{
+ ring->idle = ring->full = 0;
+}
+
+static void ring_sample(struct drm_device *dev, struct ring *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int full;
+
+ if (!ring->size)
+ return;
+
+ gen6_force_wake_get(dev);
+ ring->head = ring_read(ring, _RING_HEAD) & HEAD_ADDR;
+ ring->tail = ring_read(ring, _RING_TAIL) & TAIL_ADDR;
+ gen6_force_wake_put(dev);
+
+ if (ring->tail == ring->head)
+ ring->idle++;
+
+ full = ring->tail - ring->head;
+ if (full < 0)
+ full += ring->size;
+ ring->full += full;
+}
+
+static void ring_print(struct ring *ring, unsigned long samples_per_sec)
+{
+ int samples_to_percent_ratio, percent;
+
+ /* Calculate current value of samples_to_percent_ratio */
+ samples_to_percent_ratio = (ring->idle * 100) / samples_per_sec;
+ percent = 100 - samples_to_percent_ratio;
+
+ if (!ring->size)
+ return;
+
+ DRM_ERROR("%25s busy: %3d%%: ", ring->name, percent);
+ DRM_ERROR("%24s space: %d/%d (%d%%)",
+ ring->name,
+ (int)(ring->full / samples_per_sec),
+ ring->size,
+ (int)((ring->full / samples_to_percent_ratio) / ring->size));
+}
+
+
+void i915_gpu_top(struct drm_device *dev)
+{
+ struct ring render_ring = {
+ .name = "render",
+ .mmio = 0x2030,
+ }, bsd_ring = {
+ .name = "bitstream",
+ .mmio = 0x4030,
+ }, bsd6_ring = {
+ .name = "bitstream",
+ .mmio = 0x12030,
+ }, blt_ring = {
+ .name = "blitter",
+ .mmio = 0x22030,
+ };
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t samples_per_sec = SAMPLES_PER_SEC;
+ struct timeval elapsed_time;
+ int i,j, k;
+ struct timeval t1, ti, tf, t2;
+ uint64_t def_sleep;
+ uint64_t last_samples_per_sec;
+ int percent;
+
+ init_instdone_definitions(dev);
+
+ for (i = 0; i < num_instdone_bits; i++) {
+ top_bits[i].bit = &instdone_bits[i];
+ top_bits[i].count = 0;
+ top_bits_sorted[i] = &top_bits[i];
+ }
+
+ ring_init(dev, &render_ring);
+ if (IS_GEN4(dev) || IS_GEN5(dev))
+ ring_init(dev, &bsd_ring);
+ if (IS_GEN6(dev)) {
+ ring_init(dev, &bsd6_ring);
+ ring_init(dev, &blt_ring);
+ }
+
+ /* Initialize GPU stats */
+ if (HAS_STATS_REGS(dev)) {
+ for (i = 0; i < STATS_COUNT; i++) {
+ uint32_t stats_high, stats_low, stats_high_2;
+
+ do {
+ stats_high = I915_READ(stats_regs[i] + 4);
+ stats_low = I915_READ(stats_regs[i]);
+ stats_high_2 = I915_READ(stats_regs[i] + 4);
+ } while (stats_high != stats_high_2);
+
+ last_stats[i] = (uint64_t)stats_high << 32 |
+ stats_low;
+ }
+ }
+ //start record
+ for (k = 0; k < 200000; k++) {
+ def_sleep = (1000000 / samples_per_sec);
+ last_samples_per_sec = samples_per_sec;
+
+// t1 = gettime();
+ do_gettimeofday(&t1);
+
+ ring_reset(&render_ring);
+ ring_reset(&bsd_ring);
+ ring_reset(&bsd6_ring);
+ ring_reset(&blt_ring);
+
+ for (i = 0; i < samples_per_sec; i++) {
+ long interval;
+ long t_diff;
+ do_gettimeofday(&ti);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ instdone = I915_READ(INST_DONE_I965);
+ instdone1 = I915_READ(INST_DONE_1);
+ } else
+ instdone = I915_READ(INST_DONE);
+
+ for (j = 0; j < num_instdone_bits; j++)
+ update_idle_bit(&top_bits[j]);
+
+ ring_sample(dev, &render_ring);
+ ring_sample(dev, &bsd_ring);
+ ring_sample(dev, &bsd6_ring);
+ ring_sample(dev, &blt_ring);
+
+ do_gettimeofday(&tf);
+ t_diff = (tf.tv_sec - t1.tv_sec) * 1000000 + tf.tv_usec - t1.tv_usec;
+ if (t_diff >= 1000000) {
+ /* We are out of sync, bail out */
+ last_samples_per_sec = i+1;
+ break;
+ }
+ t_diff = (tf.tv_sec - ti.tv_sec) * 1000000 + tf.tv_usec - ti.tv_usec;
+ interval = def_sleep - t_diff;
+ if (interval > 0)
+ udelay(interval);
+ }
+
+ if (HAS_STATS_REGS(dev)) {
+ for (i = 0; i < STATS_COUNT; i++) {
+ uint32_t stats_high, stats_low, stats_high_2;
+
+ do {
+ stats_high = I915_READ(stats_regs[i] + 4);
+ stats_low = I915_READ(stats_regs[i]);
+ stats_high_2 = I915_READ(stats_regs[i] + 4);
+ } while (stats_high != stats_high_2);
+
+ stats[i] = (uint64_t)stats_high << 32 |
+ stats_low;
+ }
+ }
+
+
+
+ //sort
+// qsort(top_bits_sorted, num_instdone_bits,
+// sizeof(struct top_bit *), top_bits_sort);
+ struct top_bit tmp_top;
+ for (i = 0; i < num_instdone_bits; i++) {
+ for (j = i+1; j < num_instdone_bits; j++) {
+ if (top_bits_sort(&top_bits_sorted[i], &top_bits_sorted[j]) == 1) {
+ tmp_top.bit = top_bits_sorted[i]->bit;
+ tmp_top.count = top_bits_sorted[i]->count;
+
+ top_bits_sorted[i]->bit = top_bits_sorted[j]->bit;
+ top_bits_sorted[i]->count = top_bits_sorted[j]->count;
+
+ top_bits_sorted[j]->bit = tmp_top.bit;
+ top_bits_sorted[j]->count = tmp_top.count;
+ }
+ }
+ }
+ //print info
+ int max_lines = -1;
+ max_lines = num_instdone_bits;
+
+ max_lines = 15;
+
+ do_gettimeofday(&t2);
+ timevalsub(&t2, &t1);
+ timevaladd(&elapsed_time, &t2);
+ long ttt = t2.tv_sec * 1000000 + t2.tv_usec;
+ DRM_ERROR("caculate time %ld usec!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", ttt);
+
+ print_clock_info(dev);
+
+ ring_print(&render_ring, last_samples_per_sec);
+ ring_print(&bsd_ring, last_samples_per_sec);
+ ring_print(&bsd6_ring, last_samples_per_sec);
+ ring_print(&blt_ring, last_samples_per_sec);
+
+ for (i = 0; i < max_lines; i++) {
+ if (top_bits_sorted[i]->count > 0) {
+ percent = (top_bits_sorted[i]->count * 100) /
+ last_samples_per_sec;
+ DRM_ERROR("%30s: %3d%%: ",
+ top_bits_sorted[i]->bit->name,
+ percent);
+ } else {
+ DRM_ERROR(" ");
+ }
+
+ if (i < STATS_COUNT && HAS_STATS_REGS(dev)) {
+ DRM_ERROR("%13s: %lu (%ld/sec)",
+ stats_reg_names[i],
+ stats[i],
+ stats[i] - last_stats[i]);
+ last_stats[i] = stats[i];
+ } else {
+ if (!top_bits_sorted[i]->count)
+ break;
+ }
+ }
+
+ for (i = 0; i < num_instdone_bits; i++) {
+ top_bits_sorted[i]->count = 0;
+
+ if (i < STATS_COUNT)
+ last_stats[i] = stats[i];
+ }
+ udelay(4000000);
+ }
+}
+
+void gpu_top_handler(void *data)
+{
+ struct drm_device *dev = (struct drm_device *)data;
+ struct timeval t;
+ do_gettimeofday(&t);
+ DRM_ERROR("in %ld", t.tv_sec);
+ i915_gpu_top(dev);
+/*
+ mod_timer(&dev_priv->gpu_top_timer,
+ jiffies + msecs_to_jiffies(5000));
+*/
+}
+
+void ring_dump(struct drm_device *dev, struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (ring) {
+ /* dump ring infor*/
+ unsigned int *virt;
+ u32 tail, head;
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+
+ DRM_ERROR("Dump %s ring", ring->name);
+ DRM_ERROR("HEAD 0x%x TAIL 0x%x", head, tail);
+ DRM_ERROR("seq %d", ring->get_seqno(ring, true));
+
+ if (head == tail)
+ return;
+
+ for (int i = 0; i < 200; i++) {
+ virt = (unsigned int *)((intptr_t)ring->virtual_start + head + (i-180)*4);
+ DRM_ERROR("%s[0x%x]: 0x%x", ring->name, head + (i-180)*4, virt[0]);
+ }
+
+
+ }
+}
+
+void gtt_dump(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (gpu_panic_on_hang) {
+ dev->gtt_dump = kmem_zalloc(gtt_total_entries(dev_priv->gtt) * sizeof (uint32_t), KM_NOSLEEP);
+ ret = drm_agp_rw_gtt(dev, gtt_total_entries(dev_priv->gtt),
+ 0, (void *) dev->gtt_dump, 0);
+ if (ret)
+ DRM_ERROR("failed to dump whole gtt");
+ panic("gpu hang");
+ }
+}
+
+void register_dump(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_ERROR("PGTBL_ER: 0x%lx", I915_READ(PGTBL_ER));
+ DRM_ERROR("INSTPM: 0x%lx", I915_READ(INSTPM));
+ DRM_ERROR("EIR: 0x%lx", I915_READ(EIR));
+ DRM_ERROR("ERROR_GEN6: 0x%lx", I915_READ(ERROR_GEN6));
+
+ DRM_ERROR("Blitter command stream:");
+ DRM_ERROR(" IPEIR: 0x%08x",
+ I915_READ(0x22064));
+ DRM_ERROR(" IPEHR: 0x%08x",
+ I915_READ(0x22068));
+ DRM_ERROR(" INSTDONE: 0x%08x",
+ I915_READ(0x2206C));
+ DRM_ERROR(" ACTHD: 0x%08x",
+ I915_READ(0x22074));
+ DRM_ERROR("Render command stream:");
+ DRM_ERROR(" IPEIR: 0x%08x",
+ I915_READ(IPEIR_I965));
+ DRM_ERROR(" IPEHR: 0x%08x",
+ I915_READ(IPEHR_I965));
+ DRM_ERROR(" INSTDONE: 0x%08x",
+ I915_READ(INSTDONE_I965));
+ DRM_ERROR(" INSTPS: 0x%08x",
+ I915_READ(INSTPS));
+ DRM_ERROR(" INSTDONE1: 0x%08x",
+ I915_READ(INSTDONE1));
+ DRM_ERROR(" ACTHD: 0x%08x",
+ I915_READ(ACTHD_I965));
+ DRM_ERROR(" DMA_FADD_P: 0x%08x",
+ I915_READ(0x2078));
+
+ DRM_ERROR("Graphics Engine Fault 0x%lx",
+ I915_READ(0x04094));
+ DRM_ERROR("Media Engine Fault 0x%lx",
+ I915_READ(0x04194));
+ DRM_ERROR("Blitter Engine Fault 0x%lx",
+ I915_READ(0x04294));
+}
diff --git a/usr/src/uts/intel/io/i915/i915_gem_evict.c b/usr/src/uts/intel/io/i915/i915_gem_evict.c
new file mode 100644
index 0000000..9e3ae32
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_gem_evict.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2008-2010, 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uuk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drv.h"
+#include "i915_drm.h"
+
+static bool
+mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
+{
+ if (obj->pin_count)
+ return false;
+
+ list_add(&obj->exec_list, unwind, (caddr_t)obj);
+ return drm_mm_scan_add_block(obj->gtt_space);
+}
+
+int
+i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment, unsigned cache_level,
+ bool mappable, bool nonblocking)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct list_head eviction_list, unwind_list;
+ struct drm_i915_gem_object *obj;
+ int ret = 0;
+
+
+ /*
+ * The goal is to evict objects and amalgamate space in LRU order.
+ * The oldest idle objects reside on the inactive list, which is in
+ * retirement order. The next objects to retire are those on the (per
+ * ring) active list that do not have an outstanding flush. Once the
+ * hardware reports completion (the seqno is updated after the
+ * batchbuffer has been finished) the clean buffer objects would
+ * be retired to the inactive list. Any dirty objects would be added
+ * to the tail of the flushing list. So after processing the clean
+ * active objects we need to emit a MI_FLUSH to retire the flushing
+ * list, hence the retirement order of the flushing list is in
+ * advance of the dirty objects on the active lists.
+ *
+ * The retirement sequence is thus:
+ * 1. Inactive objects (already retired)
+ * 2. Clean active objects
+ * 3. Flushing list
+ * 4. Dirty active objects.
+ *
+ * On each list, the oldest objects lie at the HEAD with the freshest
+ * object on the TAIL.
+ */
+
+ INIT_LIST_HEAD(&unwind_list);
+ if (mappable)
+ drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
+ min_size, alignment, cache_level,
+ 0, dev_priv->gtt.mappable_end);
+ else
+ drm_mm_init_scan(&dev_priv->mm.gtt_space,
+ min_size, alignment, cache_level);
+
+ /* First see if there is a large enough contiguous idle region... */
+ list_for_each_entry(obj, struct drm_i915_gem_object, &dev_priv->mm.inactive_list, mm_list) {
+ if (mark_free(obj, &unwind_list))
+ goto found;
+ }
+
+ if (nonblocking)
+ goto none;
+
+ /* Now merge in the soon-to-be-expired objects... */
+ list_for_each_entry(obj, struct drm_i915_gem_object, &dev_priv->mm.active_list, mm_list) {
+ if (mark_free(obj, &unwind_list))
+ goto found;
+ }
+
+none:
+ /* Nothing found, clean up and bail out! */
+ while (!list_empty(&unwind_list)) {
+ obj = list_first_entry(&unwind_list,
+ struct drm_i915_gem_object,
+ exec_list);
+
+ ret = drm_mm_scan_remove_block(obj->gtt_space);
+ BUG_ON(ret);
+
+ list_del_init(&obj->exec_list);
+ }
+
+ /* We expect the caller to unpin, evict all and try again, or give up.
+ * So calling i915_gem_evict_everything() is unnecessary.
+ */
+ return -ENOSPC;
+
+found:
+ /* drm_mm doesn't allow any other other operations while
+ * scanning, therefore store to be evicted objects on a
+ * temporary list. */
+ INIT_LIST_HEAD(&eviction_list);
+ while (!list_empty(&unwind_list)) {
+ obj = list_first_entry(&unwind_list,
+ struct drm_i915_gem_object,
+ exec_list);
+ if (drm_mm_scan_remove_block(obj->gtt_space)) {
+ list_move(&obj->exec_list, &eviction_list, (caddr_t)obj);
+ drm_gem_object_reference(&obj->base);
+ continue;
+ }
+ list_del_init(&obj->exec_list);
+ }
+
+ /* Unbinding will emit any required flushes */
+ while (!list_empty(&eviction_list)) {
+ obj = list_first_entry(&eviction_list,
+ struct drm_i915_gem_object,
+ exec_list);
+ if (ret == 0)
+ ret = i915_gem_object_unbind(obj, 1);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+
+ return ret;
+}
+
+int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj, *next;
+ bool lists_empty;
+ int ret;
+
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.active_list));
+ if (lists_empty)
+ return -ENOSPC;
+
+ /* The gpu_idle will flush everything in the write domain to the
+ * active list. Then we must move everything off the active list
+ * with retire requests.
+ */
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(dev);
+
+ /* Having flushed everything, unbind() should never raise an error */
+ list_for_each_entry_safe(obj, next, struct drm_i915_gem_object,
+ &dev_priv->mm.inactive_list, mm_list)
+ /* LINTED */
+ if (obj->pin_count == 0)
+ WARN_ON(i915_gem_object_unbind(obj, true));
+
+ return 0;
+}
diff --git a/usr/src/uts/intel/io/i915/i915_gem_execbuffer.c b/usr/src/uts/intel/io/i915/i915_gem_execbuffer.c
new file mode 100644
index 0000000..b32278b
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_gem_execbuffer.c
@@ -0,0 +1,1192 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright © 2008,2010, 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+struct eb_objects {
+ int and;
+ struct drm_file *file_priv;
+};
+
+static struct eb_objects *
+eb_create(int size, struct drm_file *file_priv)
+{
+ struct eb_objects *eb;
+ eb = kzalloc(sizeof(struct eb_objects), GFP_KERNEL);
+ if (eb == NULL)
+ return eb;
+ eb->file_priv = file_priv;
+ eb->and = size;
+ return eb;
+}
+
+static void
+eb_reset(struct eb_objects *eb)
+{
+}
+
+static void
+eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
+{
+}
+
+static struct drm_i915_gem_object *
+eb_get_object(struct eb_objects *eb, unsigned long handle)
+{
+ return NULL;
+}
+
+static void
+eb_destroy(struct eb_objects *eb)
+{
+ kfree(eb, sizeof(struct eb_objects));
+}
+
+static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
+{
+ return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+ !obj->map_and_fenceable ||
+ obj->cache_level != I915_CACHE_NONE);
+}
+
+static int
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb,
+ struct drm_i915_gem_relocation_entry *reloc)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_gem_object *target_obj;
+ struct drm_i915_gem_object *target_i915_obj;
+ uint32_t reloc_val, reloc_offset;
+ uint32_t target_offset;
+ uint32_t *reloc_entry;
+ int ret = -EINVAL;
+
+ target_obj = drm_gem_object_lookup(dev, eb->file_priv,
+ reloc->target_handle);
+ if (target_obj == NULL)
+ return -ENOENT;
+
+ target_i915_obj = to_intel_bo(target_obj);
+ target_offset = target_i915_obj->gtt_offset;
+
+ /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
+ * pipe_control writes because the gpu doesn't properly redirect them
+ * through the ppgtt for non_secure batchbuffers. */
+ if (IS_GEN6(dev) &&
+ reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
+ !target_i915_obj->has_global_gtt_mapping) {
+ i915_gem_gtt_bind_object(target_i915_obj,
+ target_i915_obj->cache_level);
+ }
+
+ /* Validate that the target is in a valid r/w GPU domain */
+ if (reloc->write_domain & (reloc->write_domain - 1)) {
+ DRM_DEBUG("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ goto err;
+ }
+ if (unlikely((reloc->write_domain | reloc->read_domains)
+ & ~I915_GEM_GPU_DOMAINS)) {
+ DRM_DEBUG("reloc with read/write non-GPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ goto err;
+ }
+
+ target_obj->pending_read_domains |= reloc->read_domains;
+ target_obj->pending_write_domain |= reloc->write_domain;
+
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_offset == reloc->presumed_offset)
+ goto out;
+
+ /* Check that the relocation address is valid... */
+ if (reloc->offset > obj->base.size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ (int) obj->base.size);
+ goto err;
+ }
+ if (reloc->offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ goto err;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+
+ /* Map the page containing the relocation we're going to
+ * perform.
+ */
+ int reloc_base = (reloc->offset & ~(PAGE_SIZE-1));
+ reloc_offset = reloc->offset & (PAGE_SIZE-1);
+ reloc_entry = (uint32_t *)(uintptr_t)(obj->page_list[reloc_base/PAGE_SIZE] + reloc_offset);
+ reloc_val = target_offset + reloc->delta;
+
+ *reloc_entry = reloc_val;
+
+ /* and update the user's relocation entry */
+ reloc->presumed_offset = target_offset;
+
+out:
+ ret = 0;
+err:
+ drm_gem_object_unreference(target_obj);
+ return ret;
+}
+
+static int
+i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb)
+{
+#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
+ struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ int remain, ret;
+
+ user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+
+ remain = entry->relocation_count;
+ while (remain) {
+ struct drm_i915_gem_relocation_entry *r = stack_reloc;
+ int count = remain;
+ if (count > ARRAY_SIZE(stack_reloc))
+ count = ARRAY_SIZE(stack_reloc);
+ remain -= count;
+
+ if (DRM_COPY_FROM_USER(r, user_relocs, count*sizeof(r[0])))
+ return -EFAULT;
+
+ do {
+ u64 offset = r->presumed_offset;
+
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
+ if (ret)
+ return ret;
+
+ if (r->presumed_offset != offset &&
+ DRM_COPY_TO_USER(&user_relocs->presumed_offset,
+ &r->presumed_offset,
+ sizeof(r->presumed_offset))) {
+ return -EFAULT;
+ }
+
+ user_relocs++;
+ r++;
+ } while (--count);
+ }
+
+ return 0;
+#undef N_RELOC
+}
+
+static int
+i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb,
+ struct drm_i915_gem_relocation_entry *relocs)
+{
+ const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ int i, ret;
+
+ for (i = 0; i < entry->relocation_count; i++) {
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate(struct drm_device *dev,
+ struct eb_objects *eb,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ int ret = 0;
+
+ list_for_each_entry(obj, struct drm_i915_gem_object, objects, exec_list) {
+ ret = i915_gem_execbuffer_relocate_object(obj, eb);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+#define __EXEC_OBJECT_HAS_PIN (1UL<<31)
+#define __EXEC_OBJECT_HAS_FENCE (1UL<<30)
+
+static int
+need_reloc_mappable(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ return entry->relocation_count && !use_cpu_reloc(obj);
+}
+
+static int
+i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring,
+ bool *need_reloc)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ bool need_fence, need_mappable;
+ int ret;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+
+ /* workaround for GEN5 for gpu hang with ugnx */
+ if (IS_GEN5(ring->dev))
+ need_fence = obj->tiling_mode != I915_TILING_NONE;
+
+ need_mappable = need_fence || need_reloc_mappable(obj);
+
+ ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
+ if (ret)
+ return ret;
+
+ entry->flags |= __EXEC_OBJECT_HAS_PIN;
+
+ if (has_fenced_gpu_access) {
+ if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ return ret;
+
+ if (i915_gem_object_pin_fence(obj))
+ entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+
+ obj->pending_fenced_gpu_access = true;
+ }
+ }
+
+ /* Ensure ppgtt mapping exists if needed */
+ if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
+ i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+ obj, obj->cache_level);
+
+ obj->has_aliasing_ppgtt_mapping = 1;
+ }
+
+ if (entry->offset != obj->gtt_offset) {
+ entry->offset = obj->gtt_offset;
+ *need_reloc = true;
+ }
+
+ if (entry->flags & EXEC_OBJECT_WRITE) {
+ obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
+ obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
+ }
+
+ if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
+ !obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
+ return 0;
+}
+
+static void
+i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_gem_exec_object2 *entry;
+
+ if (!obj->gtt_space)
+ return;
+
+ entry = obj->exec_entry;
+
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+ i915_gem_object_unpin_fence(obj);
+
+ if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+ i915_gem_object_unpin(obj);
+
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+}
+
+static int
+i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct list_head *objects,
+ bool *need_relocs)
+{
+ struct drm_i915_gem_object *obj;
+ struct list_head ordered_objects;
+ struct list_head *tmp;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ struct drm_i915_gem_object *batch_obj;
+ int retry;
+ batch_obj = list_entry(objects->prev,
+ struct drm_i915_gem_object,
+ exec_list);
+
+ INIT_LIST_HEAD(&ordered_objects);
+ while (!list_empty(objects)) {
+ struct drm_i915_gem_exec_object2 *entry;
+ bool need_fence, need_mappable;
+
+ obj = list_first_entry(objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ entry = obj->exec_entry;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+
+ /* workaround for GEN5 for gpu hang with ugnx */
+ if (IS_GEN5(ring->dev))
+ need_fence = obj->tiling_mode != I915_TILING_NONE;
+ need_mappable = need_fence || need_reloc_mappable(obj);
+
+ if (need_mappable)
+ list_move(&obj->exec_list, &ordered_objects, (caddr_t)obj);
+ else
+ list_move_tail(&obj->exec_list, &ordered_objects, (caddr_t)obj);
+
+ obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
+ obj->base.pending_write_domain = 0;
+ obj->pending_fenced_gpu_access = false;
+
+ if (IS_GEN5(ring->dev) && (batch_obj != obj) && obj->gtt_offset) {
+ int err;
+ if (obj->tiling_mode != I915_TILING_NONE)
+ err = i915_gem_object_get_fence(obj);
+ else
+ err = i915_gem_object_put_fence(obj);
+ if (err)
+ DRM_ERROR("failed to get obj 0x%p fence %d", obj, err);
+ obj->pending_fenced_gpu_access = true;
+ }
+ }
+
+ tmp = objects->next;
+ list_splice(&ordered_objects, objects, tmp);
+
+ /* Attempt to pin all of the buffers into the GTT.
+ * This is done in 3 phases:
+ *
+ * 1a. Unbind all objects that do not match the GTT constraints for
+ * the execbuffer (fenceable, mappable, alignment etc).
+ * 1b. Increment pin count for already bound objects.
+ * 2. Bind new objects.
+ * 3. Decrement pin count.
+ *
+ * This avoid unnecessary unbinding of later objects in order to makr
+ * room for the earlier objects *unless* we need to defragment.
+ */
+ retry = 0;
+ do {
+ int ret = 0;
+
+ /* Unbind any ill-fitting objects or pin. */
+ list_for_each_entry(obj, struct drm_i915_gem_object, objects, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ bool need_fence, need_mappable;
+ if (!obj->gtt_space)
+ continue;
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+
+ /* workaround for GEN5 for gpu hang with ugnx */
+ if (IS_GEN5(ring->dev))
+ need_fence = obj->tiling_mode != I915_TILING_NONE;
+ need_mappable = need_fence || need_reloc_mappable(obj);
+
+
+ if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+ (need_mappable && !obj->map_and_fenceable))
+ ret = i915_gem_object_unbind(obj, 1);
+ else
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
+ if (ret)
+ goto err;
+ }
+
+ /* Bind fresh objects */
+ list_for_each_entry(obj, struct drm_i915_gem_object, objects, exec_list) {
+ if (obj->gtt_space)
+ continue;
+
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
+ if (ret)
+ goto err;
+ }
+
+err: /* Decrement pin count for bound objects */
+ list_for_each_entry(obj, struct drm_i915_gem_object, objects, exec_list)
+ i915_gem_execbuffer_unreserve_object(obj);
+
+ if (ret != -ENOSPC || retry++)
+ return ret;
+
+ ret = i915_gem_evict_everything(ring->dev);
+ if (ret)
+ return ret;
+ } while (1);
+/* LINTED */
+}
+
+static int
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring,
+ struct list_head *objects,
+ struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *exec,
+ int count)
+{
+ struct drm_i915_gem_relocation_entry *reloc;
+ struct drm_i915_gem_object *obj;
+ bool need_relocs;
+ int *reloc_offset;
+ int i, total, ret;
+
+ /* We may process another execbuffer during the unlock... */
+ while (!list_empty(objects)) {
+ obj = list_first_entry(objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ total = 0;
+ for (i = 0; i < count; i++)
+ total += exec[i].relocation_count;
+
+ reloc_offset = drm_calloc(count, sizeof(*reloc_offset), DRM_MEM_DRIVER);
+ reloc = drm_calloc(total, sizeof(*reloc), DRM_MEM_DRIVER);
+ if (reloc == NULL || reloc_offset == NULL) {
+ drm_free(reloc, total * sizeof(*reloc), DRM_MEM_DRIVER);
+ drm_free(reloc_offset, count * sizeof(*reloc_offset), DRM_MEM_DRIVER);
+ mutex_lock(&dev->struct_mutex);
+ return -ENOMEM;
+ }
+
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+ user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
+
+ if (copy_from_user(reloc+total, user_relocs,
+ exec[i].relocation_count * sizeof(*reloc))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ reloc_offset[i] = total;
+ total += exec[i].relocation_count;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ /* reacquire the objects */
+ eb_reset(eb);
+ for (i = 0; i < count; i++) {
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ if (&obj->base == NULL) {
+ DRM_DEBUG("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ ret = -ENOENT;
+ goto err;
+ }
+
+ list_add_tail(&obj->exec_list, objects, (caddr_t)obj);
+ obj->exec_handle = exec[i].handle;
+ obj->exec_entry = &exec[i];
+ eb_add_object(eb, obj);
+ }
+
+ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+
+ ret = i915_gem_execbuffer_reserve(ring, file, objects, &need_relocs);
+ if (ret)
+ goto err;
+
+ list_for_each_entry(obj, struct drm_i915_gem_object, objects, exec_list) {
+ int offset = obj->exec_entry - exec;
+ ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
+ reloc + reloc_offset[offset]);
+ if (ret)
+ goto err;
+ }
+
+ /* Leave the user relocations as are, this is the painfully slow path,
+ * and we want to avoid the complication of dropping the lock whilst
+ * having buffers reserved in the aperture and so causing spurious
+ * ENOSPC for random operations.
+ */
+
+err:
+ drm_free(reloc, total * sizeof(*reloc), DRM_MEM_DRIVER);
+ drm_free(reloc_offset, count * sizeof(*reloc_offset), DRM_MEM_DRIVER);
+ return ret;
+}
+
+static int
+i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ uint32_t flush_domains = 0;
+ int ret;
+
+ list_for_each_entry(obj, struct drm_i915_gem_object, objects, exec_list) {
+ ret = i915_gem_object_sync(obj, ring);
+ if (ret)
+ return ret;
+
+ if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+ i915_gem_clflush_object(obj);
+
+ flush_domains |= obj->base.write_domain;
+ }
+
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ i915_gem_chipset_flush(ring->dev);
+
+ if (flush_domains & I915_GEM_DOMAIN_GTT)
+ membar_producer();
+
+ /* Unconditionally invalidate gpu caches and ensure that we do flush
+ * any residual writes from the previous batch.
+ */
+ return intel_ring_invalidate_all_caches(ring);
+}
+
+static bool
+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+{
+ return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
+}
+
+static int
+validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+ int count)
+{
+ int i;
+ int relocs_total = 0;
+ int relocs_max = INT_MAX /
+ sizeof(struct drm_i915_gem_relocation_entry);
+
+ for (i = 0; i < count; i++) {
+#if 0 /* Should match the if a few lines below */
+ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+ int length; /* limited by fault_in_pages_readable() */
+#endif
+
+ /*
+ * First check for malicious input causing overflow in
+ * the worst case where we need to allocate the entire
+ * relocation tree as a single array.
+ */
+ if (exec[i].relocation_count> relocs_max - relocs_total)
+ return -EINVAL;
+ relocs_total += exec[i].relocation_count;
+#if 0
+ length = exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry);
+ if (!access_ok(VERIFY_READ, ptr, length))
+ return -EFAULT;
+
+ /* we may also need to update the presumed offsets */
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
+
+ if (fault_in_pages_readable(ptr, length))
+ return -EFAULT;
+#endif
+ }
+
+ return 0;
+}
+
+static void
+i915_gem_execbuffer_move_to_active(struct list_head *objects,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_object *obj;
+
+ list_for_each_entry(obj, struct drm_i915_gem_object, objects, exec_list) {
+ obj->base.write_domain = obj->base.pending_write_domain;
+ if (obj->base.write_domain == 0)
+ obj->base.pending_read_domains |= obj->base.read_domains;
+ obj->base.read_domains = obj->base.pending_read_domains;
+ obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
+
+ i915_gem_object_move_to_active(obj, ring);
+ if (obj->base.write_domain) {
+ obj->dirty = 1;
+ obj->last_write_seqno = intel_ring_get_seqno(ring);
+ if (obj->pin_count) /* check for potential scanout */
+ intel_mark_fb_busy(obj, ring);
+ }
+
+ }
+}
+
+static void
+i915_gem_execbuffer_retire_commands(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring,
+ struct drm_i915_gem_object *obj)
+{
+ /* Unconditionally force add_request to emit a full flush. */
+ ring->gpu_caches_dirty = true;
+
+ /* Add a breadcrumb for the completion of the batch buffer */
+ (void)__i915_add_request(ring, file, obj, NULL);
+}
+
+static int
+i915_reset_gen7_sol_offsets(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret, i;
+
+ if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
+ return 0;
+
+ ret = intel_ring_begin(ring, 4 * 3);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 4; i++) {
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
+ intel_ring_emit(ring, 0);
+ }
+
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+/* LINTED */
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_i915_gem_exec_object2 *exec)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct list_head objects;
+ struct eb_objects *eb;
+ struct drm_i915_gem_object *batch_obj;
+ struct drm_clip_rect *cliprects = NULL;
+ struct intel_ring_buffer *ring;
+ u32 ctx_id = i915_execbuffer2_get_context_id(*args);
+ struct batch_info_list *node;
+ u32 exec_start, exec_len;
+ u32 mask, flags;
+ int ret, mode, i;
+ bool need_relocs;
+
+ if (!i915_gem_check_execbuffer(args)) {
+ DRM_DEBUG("execbuf with invalid offset/length\n");
+ return -EINVAL;
+ }
+
+ ret = validate_exec_list(exec, args->buffer_count);
+ if (ret)
+ return ret;
+
+ flags = 0;
+ if (args->flags & I915_EXEC_SECURE) {
+ if (!file->is_master)
+ return -EPERM;
+
+ flags |= I915_DISPATCH_SECURE;
+ }
+ if (args->flags & I915_EXEC_IS_PINNED)
+ flags |= I915_DISPATCH_PINNED;
+
+ switch (args->flags & I915_EXEC_RING_MASK) {
+ case I915_EXEC_DEFAULT:
+ case I915_EXEC_RENDER:
+ ring = &dev_priv->ring[RCS];
+ break;
+ case I915_EXEC_BSD:
+ ring = &dev_priv->ring[VCS];
+ if (ctx_id != 0) {
+ DRM_DEBUG("Ring %s doesn't support contexts\n",
+ ring->name);
+ return -EPERM;
+ }
+ break;
+ case I915_EXEC_BLT:
+ ring = &dev_priv->ring[BCS];
+ if (ctx_id != 0) {
+ DRM_DEBUG("Ring %s doesn't support contexts\n",
+ ring->name);
+ return -EPERM;
+ }
+ break;
+ case I915_EXEC_VEBOX:
+ ring = &dev_priv->ring[VECS];
+ if (ctx_id != 0) {
+ DRM_DEBUG("Ring %s doesn't support contexts\n",
+ ring->name);
+ return -EPERM;
+ }
+ break;
+
+ default:
+ DRM_DEBUG("execbuf with unknown ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
+ }
+ if (!intel_ring_initialized(ring)) {
+ DRM_DEBUG("execbuf with invalid ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
+ }
+
+ mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ mask = I915_EXEC_CONSTANTS_MASK;
+ switch (mode) {
+ case I915_EXEC_CONSTANTS_REL_GENERAL:
+ case I915_EXEC_CONSTANTS_ABSOLUTE:
+ case I915_EXEC_CONSTANTS_REL_SURFACE:
+ if (ring == &dev_priv->ring[RCS] &&
+ mode != dev_priv->relative_constants_mode) {
+ if (INTEL_INFO(dev)->gen < 4)
+ return -EINVAL;
+
+ if (INTEL_INFO(dev)->gen > 5 &&
+ mode == I915_EXEC_CONSTANTS_REL_SURFACE)
+ return -EINVAL;
+
+ /* The HW changed the meaning on this bit on gen6 */
+ if (INTEL_INFO(dev)->gen >= 6)
+ mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+ }
+ break;
+ default:
+ DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
+ return -EINVAL;
+ }
+
+ if (args->buffer_count < 1) {
+ DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ if (args->num_cliprects != 0) {
+ if (ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("clip rectangles are only valid with the render ring\n");
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+ return -EINVAL;
+ }
+
+ if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
+ DRM_DEBUG("execbuf with %u cliprects\n",
+ args->num_cliprects);
+ return -EINVAL;
+ }
+
+ cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
+ GFP_KERNEL);
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+
+ if (DRM_COPY_FROM_USER(cliprects,
+ (struct drm_clip_rect __user *)
+ (uintptr_t) args->cliprects_ptr,
+ sizeof(*cliprects) * args->num_cliprects)) {
+ ret = -EFAULT;
+ goto pre_mutex_err;
+ }
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto pre_mutex_err;
+
+ if (dev_priv->mm.suspended || dev_priv->gpu_hang) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -EBUSY;
+ goto pre_mutex_err;
+ }
+
+ eb = eb_create(args->buffer_count, file);
+ if (eb == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+
+ if (MDB_TRACK_ENABLE) {
+ node = drm_alloc(sizeof (struct batch_info_list), DRM_MEM_MAPS);
+ node->num = args->buffer_count;
+ node->obj_list = drm_alloc(node->num * sizeof(caddr_t), DRM_MEM_MAPS);
+ list_add(&node->head, &dev_priv->batch_list, (caddr_t)node);
+ }
+ /* Look up object handles */
+ INIT_LIST_HEAD(&objects);
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ if (&obj->base == NULL) {
+ DRM_DEBUG("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ /* prevent error path from reading uninitialized data */
+ ret = -ENOENT;
+ goto err;
+ }
+
+ if (!list_empty(&obj->exec_list)) {
+ DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
+ obj, exec[i].handle, i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ list_add_tail(&obj->exec_list, &objects, (caddr_t)obj);
+ obj->exec_handle = exec[i].handle;
+ obj->exec_entry = &exec[i];
+ eb_add_object(eb, obj);
+
+ if (MDB_TRACK_ENABLE)
+ node->obj_list[i] = (caddr_t)obj;
+ TRACE_GEM_OBJ_HISTORY(obj, "prepare emit");
+ }
+
+ /* take note of the batch buffer before we might reorder the lists */
+ batch_obj = list_entry(objects.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+
+ /* Move the objects en-masse into the GTT, evicting if necessary. */
+ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+ ret = i915_gem_execbuffer_reserve(ring, file, &objects, &need_relocs);
+ if (ret)
+ goto err;
+
+ /* The objects are in their final locations, apply the relocations. */
+ if (need_relocs)
+ ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
+ if (ret) {
+ if (ret == -EFAULT) {
+ ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
+ &objects, eb,
+ exec,
+ args->buffer_count);
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ }
+ if (ret)
+ goto err;
+ }
+
+ /* Set the pending read domains for the batch buffer to COMMAND */
+ if (batch_obj->base.pending_write_domain) {
+ DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+ /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+ * batch" bit. Hence we need to pin secure batches into the global gtt.
+ * hsw should have this fixed, but let's be paranoid and do it
+ * unconditionally for now. */
+ if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+
+ ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
+ if (ret)
+ goto err;
+
+ ret = i915_switch_context(ring, file, ctx_id);
+ if (ret)
+ goto err;
+
+ if (ring == &dev_priv->ring[RCS] &&
+ mode != dev_priv->relative_constants_mode) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto err;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring, mask << 16 | mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = mode;
+ }
+
+ if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+ ret = i915_reset_gen7_sol_offsets(dev, ring);
+ if (ret)
+ goto err;
+ }
+
+ exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+ exec_len = args->batch_len;
+ if (cliprects) {
+ for (i = 0; i < args->num_cliprects; i++) {
+ ret = i915_emit_box(dev, &cliprects[i],
+ args->DR1, args->DR4);
+ if (ret)
+ goto err;
+
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
+ if (ret)
+ goto err;
+ }
+ } else {
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
+ if (ret)
+ goto err;
+ }
+
+ i915_gem_execbuffer_move_to_active(&objects, ring);
+ i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+
+err:
+ eb_destroy(eb);
+ while (!list_empty(&objects)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ TRACE_GEM_OBJ_HISTORY(obj, "finish emit");
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+
+ drm_free(cliprects, args->num_cliprects * sizeof(*cliprects),
+ DRM_MEM_DRIVER);
+ return ret;
+}
+
+
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+/* LINTED */
+i915_gem_execbuffer(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_execbuffer2 exec2;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret, i;
+
+ if (args->buffer_count < 1) {
+ DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, DRM_MEM_DRIVER);
+ exec2_list = drm_calloc(sizeof(*exec2_list), args->buffer_count, DRM_MEM_DRIVER);
+ if (exec_list == NULL || exec2_list == NULL) {
+ DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ drm_free(exec_list, args->buffer_count * sizeof(*exec_list), DRM_MEM_DRIVER);
+ drm_free(exec2_list, args->buffer_count * sizeof(*exec2_list), DRM_MEM_DRIVER);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_DEBUG("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free(exec_list, args->buffer_count * sizeof(*exec_list), DRM_MEM_DRIVER);
+ drm_free(exec2_list, args->buffer_count * sizeof(*exec2_list), DRM_MEM_DRIVER);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < args->buffer_count; i++) {
+ exec2_list[i].handle = exec_list[i].handle;
+ exec2_list[i].relocation_count = exec_list[i].relocation_count;
+ exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+ exec2_list[i].alignment = exec_list[i].alignment;
+ exec2_list[i].offset = exec_list[i].offset;
+ if (INTEL_INFO(dev)->gen < 4)
+ exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+ else
+ exec2_list[i].flags = 0;
+ }
+
+ exec2.buffers_ptr = args->buffers_ptr;
+ exec2.buffer_count = args->buffer_count;
+ exec2.batch_start_offset = args->batch_start_offset;
+ exec2.batch_len = args->batch_len;
+ exec2.DR1 = args->DR1;
+ exec2.DR4 = args->DR4;
+ exec2.num_cliprects = args->num_cliprects;
+ exec2.cliprects_ptr = args->cliprects_ptr;
+ exec2.flags = I915_EXEC_RENDER;
+ i915_execbuffer2_set_context_id(exec2, 0);
+
+ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ for (i = 0; i < args->buffer_count; i++)
+ exec_list[i].offset = exec2_list[i].offset;
+ /* ... and back out to userspace */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_DEBUG("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ drm_free(exec_list, args->buffer_count * sizeof(*exec_list), DRM_MEM_DRIVER);
+ drm_free(exec2_list, args->buffer_count * sizeof(*exec2_list), DRM_MEM_DRIVER);
+ return ret;
+}
+
+int
+/* LINTED */
+i915_gem_execbuffer2(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_execbuffer2 *args = data;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret;
+
+ if (args->buffer_count < 1 ||
+ args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
+ DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ exec2_list = drm_calloc(sizeof(*exec2_list), args->buffer_count, DRM_MEM_DRIVER);
+ if (exec2_list == NULL) {
+ DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec2_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_DEBUG("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free(exec2_list, args->buffer_count * sizeof(*exec2_list), DRM_MEM_DRIVER);
+ return -EFAULT;
+ }
+
+ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec2_list,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_DEBUG("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ drm_free(exec2_list, args->buffer_count * sizeof(*exec2_list), DRM_MEM_DRIVER);
+ return ret;
+}
diff --git a/usr/src/uts/intel/io/i915/i915_gem_gtt.c b/usr/src/uts/intel/io/i915/i915_gem_gtt.c
new file mode 100644
index 0000000..2d9a554
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_gem_gtt.c
@@ -0,0 +1,1180 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012, 2013, Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Copyright © 2010 Daniel Vetter
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+typedef uint32_t gtt_pte_t;
+
+/* PPGTT stuff */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+
+#define GEN6_PDE_VALID (1 << 0)
+/* gen6+ has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+#define GEN6_PTE_VALID (1 << 0)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define HSW_PTE_UNCACHED (0)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
+ uint64_t addr,
+ enum i915_cache_level level)
+{
+ gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+ switch (level) {
+ case I915_CACHE_LLC_MLC:
+ pte |= GEN6_PTE_CACHE_LLC_MLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ BUG();
+ }
+
+ return pte;
+}
+
+#define BYT_PTE_WRITEABLE (1 << 1)
+#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
+
+static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
+ uint64_t addr,
+ enum i915_cache_level level)
+{
+ gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+ /* Mark the page as writeable. Other platforms don't have a
+ * setting for read-only/writable, so this matches that behavior.
+ */
+ pte |= BYT_PTE_WRITEABLE;
+
+ if (level != I915_CACHE_NONE)
+ pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
+
+ return pte;
+}
+
+static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev,
+ uint64_t addr,
+ enum i915_cache_level level)
+{
+ gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+ if (level != I915_CACHE_NONE)
+ pte |= GEN6_PTE_CACHE_LLC;
+
+ return pte;
+}
+
+static ddi_dma_attr_t ppgt_dma_attr = {
+ DMA_ATTR_V0,
+ 0xff000U, /* dma_attr_addr_lo */
+ 0xffffffffU, /* dma_attr_addr_hi */
+ 0xffffffffU, /* dma_attr_count_max */
+ 4096, /* dma_attr_align */
+ 0x1fffU, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffU, /* dma_attr_maxxfer */
+ 0xffffffffU, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen, variable */
+ 4, /* dma_attr_granular */
+ DDI_DMA_FLAGERR, /* dma_attr_flags */
+};
+static ddi_device_acc_attr_t ppgt_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_MERGING_OK_ACC,
+ DDI_FLAGERR_ACC
+};
+
+static int
+i915_ppgtt_page_alloc(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt,
+ int pgcnt)
+{
+ ddi_dma_cookie_t cookie;
+ uint_t cookie_cnt;
+ pgcnt_t real_pgcnt;
+ uint32_t paddr, cookie_end;
+ int i, n;
+
+ ppgt_dma_attr.dma_attr_sgllen = (int)pgcnt;
+
+ if (ddi_dma_alloc_handle(dev->devinfo, &ppgt_dma_attr,
+ DDI_DMA_DONTWAIT, NULL, &ppgtt->dma_hdl)) {
+ DRM_ERROR("i915_ppgtt_page_alloc: "
+ "ddi_dma_alloc_handle failed");
+ goto err1;
+ }
+ if (ddi_dma_mem_alloc(ppgtt->dma_hdl, ptob(pgcnt), &ppgt_acc_attr,
+ IOMEM_DATA_UC_WR_COMBINE, DDI_DMA_DONTWAIT, NULL,
+ &ppgtt->kaddr, &ppgtt->real_size, &ppgtt->acc_hdl)) {
+ DRM_ERROR("drm_gem_object_alloc: "
+ "ddi_dma_mem_alloc failed");
+ goto err2;
+ }
+ if (ddi_dma_addr_bind_handle(ppgtt->dma_hdl, NULL,
+ ppgtt->kaddr, ppgtt->real_size, DDI_DMA_RDWR,
+ DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_cnt)
+ != DDI_DMA_MAPPED) {
+ DRM_ERROR("drm_gem_object_alloc: "
+ "ddi_dma_addr_bind_handle failed");
+ goto err3;
+ }
+
+ real_pgcnt = btopr(ppgtt->real_size);
+
+ ppgtt->pt_pages = kmem_zalloc(real_pgcnt * sizeof (pfn_t), KM_NOSLEEP);
+ if (ppgtt->pt_pages == NULL) {
+ DRM_DEBUG("pfnarray == NULL");
+ goto err4;
+ }
+
+ for (n = 0, i = 1; ; i++) {
+ for (paddr = cookie.dmac_address,
+ cookie_end = cookie.dmac_address + cookie.dmac_size;
+ paddr < cookie_end;
+ paddr += PAGESIZE) {
+ ppgtt->pt_pages[n++] = btop(paddr);
+ if (n >= real_pgcnt)
+ return (0);
+ }
+ if (i >= cookie_cnt)
+ break;
+ ddi_dma_nextcookie(ppgtt->dma_hdl, &cookie);
+ }
+
+err4:
+ (void) ddi_dma_unbind_handle(ppgtt->dma_hdl);
+err3:
+ ddi_dma_mem_free(&ppgtt->acc_hdl);
+err2:
+ ddi_dma_free_handle(&ppgtt->dma_hdl);
+err1:
+ return (-1);
+
+}
+
+static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
+ gen6_gtt_pte_t *pd_addr;
+ uint32_t pt_addr;
+ uint32_t pd_entry;
+ int i;
+
+ WARN_ON(ppgtt->pd_offset & 0x3f);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ pd_addr = (gen6_gtt_pte_t *)(uintptr_t)((caddr_t)dev_priv->gtt.virtual_gtt
+ + ppgtt->pd_offset + i * sizeof(gtt_pte_t));
+
+
+ pt_addr = ppgtt->pt_pages[i] << PAGE_SHIFT;
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+ pd_entry |= GEN6_PDE_VALID;
+
+ ddi_put32(dev_priv->gtt.gtt_mapping.acc_handle,
+ pd_addr, pd_entry);
+
+ }
+ (void) ddi_get32(dev_priv->gtt.gtt_mapping.acc_handle,
+ (gen6_gtt_pte_t *)(uintptr_t)((caddr_t)dev_priv->gtt.virtual_gtt + ppgtt->pd_offset));
+}
+
+static int gen6_ppgtt_enable(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t pd_offset;
+ struct intel_ring_buffer *ring;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ int i;
+
+ BUG_ON(ppgtt->pd_offset & 0x3f);
+
+ gen6_write_pdes(ppgtt);
+
+ pd_offset = ppgtt->pd_offset;
+ pd_offset /= 64; /* in cachelines, */
+ pd_offset <<= 16;
+
+ if (INTEL_INFO(dev)->gen == 6) {
+ uint32_t ecochk, gab_ctl, ecobits;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
+ ECOBITS_PPGTT_CACHE64B);
+
+ gab_ctl = I915_READ(GAB_CTL);
+ I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ ecochk = I915_READ(GAM_ECOCHK);
+ I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
+ ECOCHK_PPGTT_CACHE64B);
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ uint32_t ecochk, ecobits;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+ ecochk = I915_READ(GAM_ECOCHK);
+ if (IS_HASWELL(dev)) {
+ ecochk |= ECOCHK_PPGTT_WB_HSW;
+ } else {
+ ecochk |= ECOCHK_PPGTT_LLC_IVB;
+ ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
+ }
+ I915_WRITE(GAM_ECOCHK, ecochk);
+ /* GFX_MODE is per-ring on gen7+ */
+ }
+
+ for_each_ring(ring, dev_priv, i) {
+ if (INTEL_INFO(dev)->gen >= 7)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
+ }
+ return 0;
+}
+
+/* PPGTT support for Sandybdrige/Gen6 and later */
+static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+ unsigned first_entry,
+ unsigned num_entries)
+{
+ gen6_gtt_pte_t *pt_vaddr, scratch_pte;
+ unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned last_pte, i;
+
+ scratch_pte = ppgtt->pte_encode(ppgtt->dev,
+ ppgtt->scratch_page_paddr,
+ I915_CACHE_LLC);
+
+ while (num_entries) {
+ last_pte = first_pte + num_entries;
+ if (last_pte > I915_PPGTT_PT_ENTRIES)
+ last_pte = I915_PPGTT_PT_ENTRIES;
+
+ pt_vaddr = (uint32_t*)(uintptr_t)(ppgtt->kaddr + act_pt*PAGE_SIZE);
+
+ for (i = first_pte; i < last_pte; i++)
+ pt_vaddr[i] = scratch_pte;
+
+ num_entries -= last_pte - first_pte;
+ first_pte = 0;
+ act_pt++;
+ }
+}
+
+static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
+ unsigned first_entry, unsigned num_entries,
+ pfn_t *pages, enum i915_cache_level cache_level)
+{
+ gen6_gtt_pte_t *pt_vaddr;
+ unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned i, j;
+
+ pt_vaddr = (gen6_gtt_pte_t *)(uintptr_t)(ppgtt->kaddr + act_pt*PAGE_SIZE);
+ for (i = first_entry, j = 0; i < ( first_entry + num_entries); i++, j++) {
+
+ pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, pages[j] << PAGE_SHIFT,
+ cache_level);
+ if (++act_pte == I915_PPGTT_PT_ENTRIES) {
+ act_pt++;
+ pt_vaddr = (gen6_gtt_pte_t *)(uintptr_t)(ppgtt->kaddr + act_pt*PAGE_SIZE);
+ act_pte = 0;
+
+ }
+ }
+}
+
+static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+{
+ kmem_free(ppgtt->pt_pages, btopr(ppgtt->real_size) * sizeof (pfn_t));
+
+ (void) ddi_dma_unbind_handle(ppgtt->dma_hdl);
+ ddi_dma_mem_free(&ppgtt->acc_hdl);
+ ddi_dma_free_handle(&ppgtt->dma_hdl);
+ kfree(ppgtt, sizeof(*ppgtt));
+}
+
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_device *dev = ppgtt->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned first_pd_entry_in_global_pt;
+ int ret = -ENOMEM;
+
+ /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
+ * entries. For aliasing ppgtt support we just steal them at the end for
+ * now. */
+ first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
+
+ if (IS_HASWELL(dev)) {
+ ppgtt->pte_encode = hsw_pte_encode;
+ } else if (IS_VALLEYVIEW(dev)) {
+ ppgtt->pte_encode = byt_pte_encode;
+ } else {
+ ppgtt->pte_encode = gen6_pte_encode;
+ }
+ ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+ ppgtt->enable = gen6_ppgtt_enable;
+ ppgtt->clear_range = gen6_ppgtt_clear_range;
+ ppgtt->insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->cleanup = gen6_ppgtt_cleanup;
+ ret = i915_ppgtt_page_alloc(dev, ppgtt, ppgtt->num_pd_entries);
+ if (ret)
+ return (-ENOMEM);
+
+ ppgtt->clear_range(ppgtt, 0,
+ ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+
+ ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
+
+ return 0;
+
+}
+
+static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt;
+ int ret = -EINVAL;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return -ENOMEM;
+
+ ppgtt->dev = dev;
+ ppgtt->scratch_page_paddr = ptob(dev_priv->gtt.scratch_page->pfnarray[0]);
+
+ if (INTEL_INFO(dev)->gen < 8)
+ ret = gen6_ppgtt_init(ppgtt);
+ else {
+ BUG();
+ DRM_ERROR("ppgtt is not supported");
+ }
+
+ if (ret)
+ kfree(ppgtt, sizeof(*ppgtt));
+ else
+ dev_priv->mm.aliasing_ppgtt = ppgtt;
+
+ return ret;
+}
+
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+ if (!ppgtt)
+ return;
+
+ ppgtt->cleanup(ppgtt);
+ dev_priv->mm.aliasing_ppgtt = NULL;
+}
+
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+
+ ppgtt->insert_entries(ppgtt,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->base.pfnarray,
+ cache_level);
+}
+
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj)
+{
+ ppgtt->clear_range(ppgtt,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
+}
+
+void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ /* First fill our portion of the GTT with scratch pages */
+/*
+ i915_ggtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
+ (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
+*/
+
+ list_for_each_entry(obj, struct drm_i915_gem_object, &dev_priv->mm.bound_list, global_list) {
+ i915_gem_clflush_object(obj);
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+ }
+
+ i915_gem_chipset_flush(dev);
+}
+
+int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+{
+#if 0
+ if (obj->has_dma_mapping)
+ return 0;
+
+ if (!dma_map_sg(&obj->base.dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return -ENOSPC;
+
+#endif
+ return 0;
+}
+
+/*
+ * Binds an object into the global gtt with the specified cache level. The object
+ * will be accessible to the GPU via commands whose operands reference offsets
+ * within the global GTT as well as accessible by the GPU through the GMADR
+ * mapped BAR (dev_priv->mm.gtt->gtt).
+ */
+static void gen6_ggtt_insert_entries(struct drm_i915_gem_object *obj,
+ enum i915_cache_level level)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned first_entry = obj->gtt_offset >> PAGE_SHIFT;
+ unsigned num_entries = obj->base.size / PAGE_SIZE;
+ /* LINTED */
+ const int max_entries = gtt_total_entries(dev_priv->gtt);
+ uint64_t page_addr;
+ gtt_pte_t *gtt_addr;
+ int i, j;
+
+ for (i = first_entry, j = 0; i < ( first_entry + num_entries); i++, j++) {
+ gtt_addr = (gtt_pte_t *)(uintptr_t)((caddr_t)dev_priv->gtt.virtual_gtt + i * sizeof(gtt_pte_t));
+ page_addr = obj->base.pfnarray[j] << PAGE_SHIFT;
+ ddi_put32(dev_priv->gtt.gtt_mapping.acc_handle,
+ gtt_addr,
+ dev_priv->gtt.pte_encode(dev, page_addr, level));
+ }
+
+ BUG_ON(i > max_entries);
+ BUG_ON(j != obj->base.size / PAGE_SIZE);
+
+ /* XXX: This serves as a posting read to make sure that the PTE has
+ * actually been updated. There is some concern that even though
+ * registers and PTEs are within the same BAR that they are potentially
+ * of NUMA access patterns. Therefore, even with the way we assume
+ * hardware should work, we must keep this posting read for paranoia.
+ */
+ if (i != 0) {
+ gtt_addr = (gtt_pte_t *)(uintptr_t)((caddr_t)dev_priv->gtt.virtual_gtt + (i-1)*sizeof(gtt_pte_t));
+ page_addr = obj->base.pfnarray[j-1] << PAGE_SHIFT;
+ WARN_ON(ddi_get32(dev_priv->gtt.gtt_mapping.acc_handle, gtt_addr) !=
+ dev_priv->gtt.pte_encode(dev, page_addr, level));
+ }
+
+ /* This next bit makes the above posting read even more important. We
+ * want to flush the TLBs only after we're certain all the PTE updates
+ * have finished.
+ */
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+static void gen6_ggtt_clear_range(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ uint32_t type)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ gtt_pte_t scratch_pte;
+ unsigned first_entry = obj->gtt_offset >> PAGE_SHIFT;
+ unsigned num_entries = obj->base.size / PAGE_SIZE;
+ const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ uint64_t scratch_page_addr = dev_priv->gtt.scratch_page->pfnarray[0] << PAGE_SHIFT;
+ gtt_pte_t *gtt_addr;
+ int i;
+
+ if (num_entries > max_entries) {
+ DRM_ERROR("First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries);
+ num_entries = max_entries;
+ }
+
+ for (i = first_entry ; i < ( first_entry + num_entries); i++) {
+ gtt_addr = (gtt_pte_t *)(uintptr_t)((caddr_t)dev_priv->gtt.virtual_gtt + i * sizeof(gtt_pte_t));
+ scratch_pte = dev_priv->gtt.pte_encode(dev, scratch_page_addr, I915_CACHE_LLC);
+ ddi_put32(dev_priv->gtt.gtt_mapping.acc_handle,
+ gtt_addr, scratch_pte);
+ }
+}
+
+void i915_ggtt_insert_entries(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ struct drm_device *dev = obj->base.dev;
+
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ (void) drm_agp_bind_pages(dev,
+ obj->base.pfnarray,
+ obj->base.size >> PAGE_SHIFT,
+ obj->gtt_offset,
+ flags);
+}
+
+static void i915_ggtt_clear_range(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ uint32_t type)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!obj->agp_mem) {
+ (void) drm_agp_unbind_pages(dev, obj->base.pfnarray,
+ (obj->base.size / PAGE_SIZE), obj->gtt_offset,
+ dev_priv->gtt.scratch_page->pfnarray[0], type);
+ obj->agp_mem = -1;
+ }
+ return;
+
+}
+
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->gtt.gtt_insert_entries(obj, cache_level);
+ obj->has_global_gtt_mapping = 1;
+}
+
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj, uint32_t type)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->gtt.gtt_clear_range(dev, obj, type);
+
+ obj->has_global_gtt_mapping = 0;
+}
+
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+{
+}
+
+static void i915_gtt_color_adjust(struct drm_mm_node *node,
+ unsigned long color,
+ unsigned long *start,
+ unsigned long *end)
+{
+ if (node->color != color)
+ *start += 4096;
+
+ if (!list_empty(&node->node_list)) {
+ node = list_entry(node->node_list.next,
+ struct drm_mm_node,
+ node_list);
+ if (node->allocated && node->color != color)
+ *end -= 4096;
+ }
+}
+
+void i915_gem_setup_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
+{
+ /* Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch page.
+ * There are a number of places where the hardware apparently prefetches
+ * past the end of the object, and we've seen multiple hangs with the
+ * GPU head pointer stuck in a batchbuffer bound at the last page of the
+ * aperture. One page should be enough to keep any prefetching inside
+ * of the aperture.
+ */
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ /* Substract the guard page ... */
+ drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+ if (!HAS_LLC(dev))
+ dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
+
+ /* Mark any preallocated objects as occupied */
+ list_for_each_entry(obj, struct drm_i915_gem_object,
+ &dev_priv->mm.bound_list, global_list) {
+ DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
+ obj->gtt_offset, obj->base.size);
+
+ BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
+ obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
+ obj->gtt_offset,
+ obj->base.size,
+ false);
+ obj->has_global_gtt_mapping = 1;
+ }
+
+ dev_priv->gtt.start = start;
+ dev_priv->gtt.total = end - start;
+}
+
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+ if (i915_enable_ppgtt >= 0)
+ return i915_enable_ppgtt;
+
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6)
+ return false;
+
+ return true;
+}
+
+void i915_gem_init_global_gtt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long gtt_size, mappable_size;
+
+ gtt_size = dev_priv->gtt.total;
+ mappable_size = dev_priv->gtt.mappable_end;
+
+ if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+
+ if (INTEL_INFO(dev)->gen <= 7) {
+ /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+ * aperture accordingly when using aliasing ppgtt. */
+ gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ }
+ }
+ i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+
+ setup_scratch_page(dev);
+
+ if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+ int ret;
+ ret = i915_gem_init_aliasing_ppgtt(dev);
+ if (!ret)
+ return;
+ DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
+ drm_mm_takedown(&dev_priv->mm.gtt_space);
+ gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ }
+
+}
+
+int setup_scratch_page(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int gen;
+
+ /* setup scratch page */
+ dev_priv->gtt.scratch_page = kzalloc(sizeof(struct drm_gem_object), GFP_KERNEL);
+ if (dev_priv->gtt.scratch_page == NULL)
+ return (-ENOMEM);
+
+ if (IS_G33(dev))
+ gen = 33;
+ else
+ gen = INTEL_INFO(dev)->gen * 10;
+
+ if (drm_gem_object_init(dev, dev_priv->gtt.scratch_page, DRM_PAGE_SIZE, gen) != 0) {
+ kmem_free(dev_priv->gtt.scratch_page, sizeof (struct drm_gem_object));
+ return (-ENOMEM);
+ }
+ (void) memset(dev_priv->gtt.scratch_page->kaddr, 0, DRM_PAGE_SIZE);
+
+ return 0;
+}
+
+void teardown_scratch_page(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ drm_gem_object_release(dev_priv->gtt.scratch_page);
+ kmem_free(dev_priv->gtt.scratch_page, sizeof (struct drm_gem_object));
+}
+
+static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+ return snb_gmch_ctl << 20;
+}
+
+static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
+ return snb_gmch_ctl << 25; /* 32 MB units */
+}
+
+static ddi_device_acc_attr_t gtt_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC,
+};
+
+#define GEN6_GTTMMADR 1
+#define GEN6_GTT_OFFSET 0x200000
+
+static void i915_gen6_gtt_ioremap(struct drm_local_map *map, struct drm_device *dev)
+{
+ caddr_t base;
+ int ret;
+
+ ret = ddi_regs_map_setup(dev->devinfo, GEN6_GTTMMADR,
+ (caddr_t *)&base, 0, 0,
+ &gtt_attr, &map->acc_handle);
+
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("failed to map GTT");
+ }
+
+ map->handle = (void *)(base + GEN6_GTT_OFFSET);
+}
+
+static int gen6_gmch_probe(struct drm_device *dev,
+ size_t *gtt_total,
+ size_t *stolen)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned int gtt_size;
+ u16 snb_gmch_ctl;
+
+ dev_priv->gtt.mappable_end = pci_resource_len(dev->pdev, 1);
+
+ /* 64/512MB is the current min/max we actually know of, but this is just
+ * a coarse sanity check.
+ */
+ if ((dev_priv->gtt.mappable_end < (64<<20) ||
+ (dev_priv->gtt.mappable_end > (512<<20)))) {
+ DRM_ERROR("Unknown GMADR size (%lx)\n",
+ dev_priv->gtt.mappable_end);
+ return -ENXIO;
+ }
+
+ pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
+
+ *stolen = gen6_get_stolen_size(snb_gmch_ctl);
+
+ *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
+
+ /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
+ dev_priv->gtt.gtt_mapping.offset = dev->agp_aperbase;
+ dev_priv->gtt.gtt_mapping.size = gtt_size;
+ dev_priv->gtt.gtt_mapping.type = 0;
+ dev_priv->gtt.gtt_mapping.flags = 0;
+ dev_priv->gtt.gtt_mapping.mtrr = 0;
+
+ i915_gen6_gtt_ioremap(&dev_priv->gtt.gtt_mapping, dev);
+
+ if (dev_priv->gtt.gtt_mapping.handle == NULL) {
+ DRM_ERROR("Failed to map the gtt page table");
+ return -ENOMEM;
+ }
+ dev_priv->gtt.virtual_gtt = (caddr_t)dev_priv->gtt.gtt_mapping.handle;
+
+ dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
+ dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
+
+ return 0;
+}
+
+static void gen6_gmch_remove(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_core_ioremapfree(&dev_priv->gtt.gtt_mapping, dev);
+}
+
+static unsigned int
+intel_gtt_stolen_size(struct drm_device *dev)
+{
+ u16 gmch_ctrl;
+ u8 rdct;
+ int local = 0;
+ static const int ddt[4] = { 0, 16, 32, 64 };
+ unsigned int stolen_size = 0;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if ( INTEL_INFO(dev)->gen == 1)
+ return 0; /* no stolen mem on i81x */
+
+ if (i915_bridge_dev_read_config_word(&dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl))
+ return 0;
+
+ if (dev->pdev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+ dev->pdev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ case I830_GMCH_GMS_STOLEN_512:
+ stolen_size = KB(512);
+ break;
+ case I830_GMCH_GMS_STOLEN_1024:
+ stolen_size = MB(1);
+ break;
+ case I830_GMCH_GMS_STOLEN_8192:
+ stolen_size = MB(8);
+ break;
+ case I830_GMCH_GMS_LOCAL:
+ rdct = I915_READ8(I830_RDRAM_CHANNEL_TYPE);
+ stolen_size = (I830_RDRAM_ND(rdct) + 1) *
+ MB(ddt[I830_RDRAM_DDT(rdct)]);
+ local = 1;
+ break;
+ default:
+ stolen_size = 0;
+ break;
+ }
+ } else {
+ switch (gmch_ctrl & INTEL_GMCH_GMS_MASK) {
+ case INTEL_855_GMCH_GMS_STOLEN_1M:
+ stolen_size = MB(1);
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_4M:
+ stolen_size = MB(4);
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_8M:
+ stolen_size = MB(8);
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_16M:
+ stolen_size = MB(16);
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_32M:
+ stolen_size = MB(32);
+ break;
+ case INTEL_915G_GMCH_GMS_STOLEN_48M:
+ stolen_size = MB(48);
+ break;
+ case INTEL_915G_GMCH_GMS_STOLEN_64M:
+ stolen_size = MB(64);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_128M:
+ stolen_size = MB(128);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_256M:
+ stolen_size = MB(256);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_96M:
+ stolen_size = MB(96);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_160M:
+ stolen_size = MB(160);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_224M:
+ stolen_size = MB(224);
+ break;
+ case INTEL_GMCH_GMS_STOLEN_352M:
+ stolen_size = MB(352);
+ break;
+ default:
+ stolen_size = 0;
+ break;
+ }
+ }
+
+ if (stolen_size > 0) {
+ DRM_INFO("detected %dK %s memory\n",
+ stolen_size / KB(1), local ? "local" : "stolen");
+ } else {
+ DRM_INFO("no pre-allocated video memory detected\n");
+ stolen_size = 0;
+ }
+
+ return stolen_size;
+}
+
+static unsigned int
+intel_gtt_mappable_entries(struct drm_device *dev)
+{
+ unsigned int aperture_size;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen == 1) {
+ u32 smram_miscc;
+
+ pci_read_config_dword(dev->pdev,
+ I810_SMRAM_MISCC, &smram_miscc);
+
+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
+ == I810_GFX_MEM_WIN_32M)
+ aperture_size = MB(32);
+ else
+ aperture_size = MB(64);
+ } else if (INTEL_INFO(dev)->gen == 2) {
+ u16 gmch_ctrl;
+
+ if (i915_bridge_dev_read_config_word(&dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl))
+ return 0;
+
+ if ((gmch_ctrl & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
+ aperture_size = MB(64);
+ else
+ aperture_size = MB(128);
+ } else {
+ /* 9xx supports large sizes, just look at the length */
+ aperture_size = pci_resource_len(dev->pdev,
+ (IS_G33(dev) || IS_I945GM(dev)) ? 2 : 1); /* OSOL_i915 */
+ DRM_DEBUG("aperture_size=%d", aperture_size);
+ }
+
+ return aperture_size >> PAGE_SHIFT;
+}
+
+static void
+i965_adjust_pgetbl_size(struct drm_device *dev, unsigned int size_flag)
+{
+ u32 pgetbl_ctl, pgetbl_ctl2;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* ensure that ppgtt is disabled */
+ pgetbl_ctl2 = I915_READ(I965_PGETBL_CTL2);
+ pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
+ I915_WRITE(pgetbl_ctl2, I965_PGETBL_CTL2);
+
+ /* write the new ggtt size */
+ pgetbl_ctl = I915_READ(I810_PGETBL_CTL);
+ pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
+ pgetbl_ctl |= size_flag;
+ I915_WRITE(pgetbl_ctl, I810_PGETBL_CTL);
+}
+
+static unsigned int
+i965_gtt_total_entries(struct drm_device *dev)
+{
+ int size;
+ u32 pgetbl_ctl;
+ u16 gmch_ctl;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (i915_bridge_dev_read_config_word(&dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctl))
+ return 0;
+
+ if (INTEL_INFO(dev)->gen == 5) {
+ switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
+ case G4x_GMCH_SIZE_1M:
+ case G4x_GMCH_SIZE_VT_1M:
+ i965_adjust_pgetbl_size(dev, I965_PGETBL_SIZE_1MB);
+ break;
+ case G4x_GMCH_SIZE_VT_1_5M:
+ i965_adjust_pgetbl_size(dev, I965_PGETBL_SIZE_1_5MB);
+ break;
+ case G4x_GMCH_SIZE_2M:
+ case G4x_GMCH_SIZE_VT_2M:
+ i965_adjust_pgetbl_size(dev, I965_PGETBL_SIZE_2MB);
+ break;
+ }
+ }
+
+ pgetbl_ctl = I915_READ(I810_PGETBL_CTL);
+
+ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+ case I965_PGETBL_SIZE_128KB:
+ size = KB(128);
+ break;
+ case I965_PGETBL_SIZE_256KB:
+ size = KB(256);
+ break;
+ case I965_PGETBL_SIZE_512KB:
+ size = KB(512);
+ break;
+ /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
+ case I965_PGETBL_SIZE_1MB:
+ size = KB(1024);
+ break;
+ case I965_PGETBL_SIZE_2MB:
+ size = KB(2048);
+ break;
+ case I965_PGETBL_SIZE_1_5MB:
+ size = KB(1024 + 512);
+ break;
+ default:
+ DRM_DEBUG("unknown page table size, assuming 512KB");
+ size = KB(512);
+ }
+
+ return size/4;
+}
+
+static unsigned int
+intel_gtt_total_entries(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (IS_G33(dev) || INTEL_INFO(dev)->gen == 4 || INTEL_INFO(dev)->gen == 5)
+ return i965_gtt_total_entries(dev);
+ else {
+ /* On previous hardware, the GTT size was just what was
+ * required to map the aperture.
+ */
+ return (dev_priv->gtt.mappable_end >> PAGE_SHIFT);
+ }
+}
+
+static int i915_gmch_probe(struct drm_device *dev,
+ size_t *gtt_total,
+ size_t *stolen)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->gtt.mappable_end = intel_gtt_mappable_entries(dev) << PAGE_SHIFT;
+ *gtt_total = intel_gtt_total_entries(dev) << PAGE_SHIFT;
+ *stolen = intel_gtt_stolen_size(dev);
+
+ dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
+ dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
+
+ return 0;
+}
+
+static void i915_gmch_remove(struct drm_device *dev)
+{
+
+}
+
+int i915_gem_gtt_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_gtt *gtt = &dev_priv->gtt;
+ int ret;
+
+ gtt->mappable_base = dev->agp_aperbase;
+
+ if (INTEL_INFO(dev)->gen <= 5) {
+ dev_priv->gtt.gtt_probe = i915_gmch_probe;
+ dev_priv->gtt.gtt_remove = i915_gmch_remove;
+ } else {
+ dev_priv->gtt.gtt_probe = gen6_gmch_probe;
+ dev_priv->gtt.gtt_remove = gen6_gmch_remove;
+ if (IS_HASWELL(dev)) {
+ dev_priv->gtt.pte_encode = hsw_pte_encode;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->gtt.pte_encode = byt_pte_encode;
+ } else {
+ dev_priv->gtt.pte_encode = gen6_pte_encode;
+ }
+ }
+
+ ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
+ &dev_priv->gtt.stolen_size);
+ if (ret)
+ return ret;
+
+ /* GMADR is the PCI mmio aperture into the global GTT. */
+ DRM_INFO("Memory usable by graphics device = %dM\n",
+ dev_priv->gtt.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
+ dev_priv->gtt.mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GTT stolen size = %dM\n",
+ dev_priv->gtt.stolen_size >> 20);
+
+ return 0;
+}
+
+void
+intel_rw_gtt(struct drm_device *dev,
+ size_t size,
+ uint32_t gtt_offset,
+ void *gttp,
+ uint32_t type)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned first_entry = gtt_offset >> PAGE_SHIFT;
+ unsigned num_entries = size / PAGE_SIZE;
+ uint32_t *gtt_addr;
+ uint32_t *old_gtt = (uint32_t *)gttp;
+ int i, j;
+
+ if (INTEL_INFO(dev)->gen <= 5) {
+ (void) drm_agp_rw_gtt(dev, size/PAGE_SIZE, gtt_offset,
+ gttp, type);
+ } else {
+ if (type) {
+ for (i = first_entry, j = 0; i < ( first_entry + num_entries); i++, j++) {
+ gtt_addr = (uint32_t *)(uintptr_t)((caddr_t)dev_priv->gtt.virtual_gtt
+ + i * sizeof(uint32_t));
+ ddi_put32(dev_priv->gtt.gtt_mapping.acc_handle,
+ gtt_addr, old_gtt[j]);
+ }
+ } else {
+ for (i = first_entry; i < ( first_entry + num_entries); i++) {
+ gtt_addr = (uint32_t *)(uintptr_t)((caddr_t)dev_priv->gtt.virtual_gtt
+ + i * sizeof(uint32_t));
+ old_gtt[i] = ddi_get32(dev_priv->gtt.gtt_mapping.acc_handle,gtt_addr);
+ }
+ }
+ }
+}
+
+void
+i915_clean_gtt(struct drm_device *dev, size_t offset)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned first_entry = offset >> PAGE_SHIFT;
+ unsigned num_entries = (dev_priv->gtt.total - offset - PAGE_SIZE) >> PAGE_SHIFT;
+ uint64_t scratch_page_addr = dev_priv->gtt.scratch_page->pfnarray[0] << PAGE_SHIFT;
+ gtt_pte_t *gtt_addr, scratch_pte;
+ int i;
+
+ if (INTEL_INFO(dev)->gen <= 5) {
+ (void) drm_agp_unbind_pages(dev, NULL, num_entries,
+ offset, dev_priv->gtt.scratch_page->pfnarray[0] ,1);
+ } else {
+ for (i = first_entry ; i < ( first_entry + num_entries); i++) {
+ gtt_addr = (gtt_pte_t *)(uintptr_t)((caddr_t)dev_priv->gtt.virtual_gtt
+ + i * sizeof(gtt_pte_t));
+ scratch_pte = gen6_pte_encode(dev, scratch_page_addr, I915_CACHE_LLC);
+ ddi_put32(dev_priv->gtt.gtt_mapping.acc_handle,
+ gtt_addr, scratch_pte);
+ }
+ }
+}
+
diff --git a/usr/src/uts/intel/io/i915/i915_gem_stolen.c b/usr/src/uts/intel/io/i915/i915_gem_stolen.c
new file mode 100644
index 0000000..41c1b57
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_gem_stolen.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright © 2008-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/*
+ * The BIOS typically reserves some of the system's memory for the exclusive
+ * use of the integrated graphics. This memory is no longer available for
+ * use by the OS and so the user finds that his system has less memory
+ * available than he put in. We refer to this memory as stolen.
+ *
+ * The BIOS will allocate its framebuffer from the stolen memory. Our
+ * goal is try to reuse that object for our own fbcon which must always
+ * be available for panics. Anything else we can reuse the stolen memory
+ * for is a boon.
+ */
+
+static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ u32 base;
+
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+ * is unreliable, so on those compute the base by subtracting the
+ * stolen memory from the Top of Low Usable DRAM which is where the
+ * BIOS places the graphics stolen memory.
+ *
+ * On gen2, the layout is slightly different with the Graphics Segment
+ * immediately following Top of Memory (or Top of Usable DRAM). Note
+ * it appears that TOUD is only reported by 865g, so we just use the
+ * top of memory as determined by the e820 probe.
+ *
+ * XXX gen2 requires an unavailable symbol and 945gm fails with
+ * its value of TOLUD.
+ */
+ base = 0;
+ if (IS_VALLEYVIEW(dev)) {
+ pci_read_config_dword(dev->pdev, 0x5c, &base);
+ base &= ~((1<<20) - 1);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ /* Read Base Data of Stolen Memory Register (BDSM) directly.
+ * Note that there is also a MCHBAR miror at 0x1080c0 or
+ * we could use device 2:0x5c instead.
+ */
+ pci_read_config_dword(pdev, 0xB0, &base);
+ base &= ~4095; /* lower bits used for locking register */
+ } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ /* Read Graphics Base of Stolen Memory directly */
+ pci_read_config_dword(pdev, 0xA4, &base);
+#if 0
+ } else if (IS_GEN3(dev)) {
+ u8 val;
+ /* Stolen is immediately below Top of Low Usable DRAM */
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
+ base -= dev_priv->mm.gtt->stolen_size;
+ } else {
+ /* Stolen is immediately above Top of Memory */
+ base = max_low_pfn_mapped << PAGE_SHIFT;
+#endif
+ }
+
+ return base;
+}
+
+static int i915_setup_compression(struct drm_device *dev, int size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *compressed_fb, *compressed_llb;
+
+ /* Try to over-allocate to reduce reallocations and fragmentation */
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
+ size <<= 1, 4096, 0);
+ if (!compressed_fb)
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
+ size >>= 1, 4096, 0);
+ if (compressed_fb)
+ compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+ if (!compressed_fb)
+ goto err;
+
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ else if (IS_GM45(dev)) {
+ I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ } else {
+ compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
+ 4096, 4096, 0);
+ if (compressed_llb)
+ compressed_llb = drm_mm_get_block(compressed_llb,
+ 4096, 4096);
+ if (!compressed_llb)
+ goto err_fb;
+
+ dev_priv->compressed_llb = compressed_llb;
+
+ I915_WRITE(FBC_CFB_BASE,
+ dev_priv->mm.stolen_base + compressed_fb->start);
+ I915_WRITE(FBC_LL_BASE,
+ dev_priv->mm.stolen_base + compressed_llb->start);
+ }
+
+ dev_priv->compressed_fb = compressed_fb;
+ dev_priv->cfb_size = size;
+
+ DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
+ size);
+
+ return 0;
+
+err_fb:
+ drm_mm_put_block(compressed_fb);
+err:
+ return -ENOSPC;
+}
+
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ return -ENODEV;
+
+ if (size < dev_priv->cfb_size)
+ return 0;
+
+ /* Release any current block */
+ i915_gem_stolen_cleanup_compression(dev);
+
+ return i915_setup_compression(dev, size);
+}
+
+void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->cfb_size == 0)
+ return;
+
+ if (dev_priv->compressed_fb)
+ drm_mm_put_block(dev_priv->compressed_fb);
+
+ if (dev_priv->compressed_llb)
+ drm_mm_put_block(dev_priv->compressed_llb);
+
+ dev_priv->cfb_size = 0;
+}
+
+void i915_gem_cleanup_stolen(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ return;
+
+ i915_gem_stolen_cleanup_compression(dev);
+ drm_mm_takedown(&dev_priv->mm.stolen);
+}
+
+int i915_gem_init_stolen(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int bios_reserved = 0;
+
+ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+ if (dev_priv->mm.stolen_base == 0)
+ return 0;
+
+ DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
+ dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
+
+ if (IS_VALLEYVIEW(dev))
+ bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
+
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
+ bios_reserved);
+
+ return 0;
+}
+
+#if 0
+static struct sg_table *
+i915_pages_create_for_stolen(struct drm_device *dev,
+ u32 offset, u32 size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ caddr_t *page_list;
+
+ DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
+ BUG_ON(offset > dev_priv->gtt.stolen_size - size);
+
+ /* We hide that we have no struct page backing our stolen object
+ * by wrapping the contiguous physical allocation with a fake
+ * dma mapping in a single scatterlist.
+ */
+
+ page_list = kmem_zalloc(btop(size) * sizeof(caddr_t), KM_SLEEP);
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return NULL;
+
+ if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+ kfree(st);
+ return NULL;
+ }
+
+ sg = st->sgl;
+ sg->offset = offset;
+ sg->length = size;
+
+ sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
+ sg_dma_len(sg) = size;
+
+ return st;
+}
+#endif
+
+static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+{
+ BUG();
+ return -EINVAL;
+}
+
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+{
+ /* Should only be called during free */
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
+ .get_pages = i915_gem_object_get_pages_stolen,
+ .put_pages = i915_gem_object_put_pages_stolen,
+};
+
+
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct drm_device *dev,
+ struct drm_mm_node *stolen)
+{
+#if 0
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_alloc(dev);
+ if (obj == NULL)
+ return NULL;
+
+ if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
+ goto cleanup;
+
+ i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
+
+ obj->page_list = i915_pages_create_for_stolen(dev,
+ stolen->start, stolen->size);
+ if (obj->pages == NULL)
+ goto cleanup;
+
+ obj->has_dma_mapping = true;
+ i915_gem_object_pin_pages(obj);
+ obj->stolen = stolen;
+
+ obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->cache_level = I915_CACHE_NONE;
+
+ return obj;
+
+cleanup:
+ i915_gem_object_free(obj);
+#endif
+ return NULL;
+}
+
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
+{
+ DRM_DEBUG_KMS("Not support stolen object yet");
+ return NULL;
+#if 0
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct drm_mm_node *stolen;
+
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ return NULL;
+
+ DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
+ if (size == 0)
+ return NULL;
+
+ stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ if (stolen)
+ stolen = drm_mm_get_block(stolen, size, 4096);
+ if (stolen == NULL)
+ return NULL;
+
+ obj = _i915_gem_object_create_stolen(dev, stolen);
+ if (obj)
+ return obj;
+
+ drm_mm_put_block(stolen);
+ return NULL;
+#endif
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+ u32 stolen_offset,
+ u32 gtt_offset,
+ u32 size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct drm_mm_node *stolen;
+
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ return NULL;
+
+ DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
+ stolen_offset, gtt_offset, size);
+
+ /* KISS and expect everything to be page-aligned */
+ BUG_ON(stolen_offset & 4095);
+ BUG_ON(size & 4095);
+
+ if (size == 0)
+ return NULL;
+
+ stolen = drm_mm_create_block(&dev_priv->mm.stolen,
+ stolen_offset, size,
+ false);
+ if (stolen == NULL) {
+ DRM_DEBUG_KMS("failed to allocate stolen space\n");
+ return NULL;
+ }
+
+ obj = _i915_gem_object_create_stolen(dev, stolen);
+ if (obj == NULL) {
+ DRM_DEBUG_KMS("failed to allocate stolen object\n");
+ drm_mm_put_block(stolen);
+ return NULL;
+ }
+
+ /* Some objects just need physical mem from stolen space */
+ if (gtt_offset == 0xffffffff)
+ return obj;
+
+ /* To simplify the initialisation sequence between KMS and GTT,
+ * we allow construction of the stolen object prior to
+ * setting up the GTT space. The actual reservation will occur
+ * later.
+ */
+ if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
+ obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
+ gtt_offset, size,
+ false);
+ if (obj->gtt_space == NULL) {
+ DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+ drm_gem_object_unreference(&obj->base);
+ return NULL;
+ }
+ } else
+ obj->gtt_space = I915_GTT_RESERVED;
+
+ obj->gtt_offset = gtt_offset;
+ obj->has_global_gtt_mapping = 1;
+
+ list_add_tail(&obj->global_list, &dev_priv->mm.bound_list, (caddr_t)obj);
+ list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list, (caddr_t)obj);
+
+ return obj;
+}
+
+void
+i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
+{
+ if (obj->stolen) {
+ drm_mm_put_block(obj->stolen);
+ obj->stolen = NULL;
+ }
+}
diff --git a/usr/src/uts/intel/io/i915/i915_gem_tiling.c b/usr/src/uts/intel/io/i915/i915_gem_tiling.c
new file mode 100644
index 0000000..d70d021
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_gem_tiling.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/** @file i915_gem_tiling.c
+ *
+ * Support for managing tiling state of buffer objects.
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled. However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y. So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip -- Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics. This
+ * is called "Channel XOR Randomization" in the MCH documentation. The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all. Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+ if (IS_VALLEYVIEW(dev)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ uint32_t dimm_c0, dimm_c1;
+ dimm_c0 = I915_READ(MAD_DIMM_C0);
+ dimm_c1 = I915_READ(MAD_DIMM_C1);
+ dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ /* Enable swizzling when the channels are populated with
+ * identically sized dimms. We don't need to check the 3rd
+ * channel because no cpu with gpu attached ships in that
+ * configuration. Also, swizzling only makes sense for 2
+ * channels anyway. */
+ if (dimm_c0 == dimm_c1) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+ } else if (IS_GEN5(dev)) {
+ /* On IRONLAKE whatever DRAM config, GPU always do
+ * same swizzling setup.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if (IS_GEN2(dev)) {
+ /* As far as we know, the 865 doesn't have these bit 6
+ * swizzling issues.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+ uint32_t dcc;
+
+ /* On mobile 9xx chipsets, channel interleave by the CPU is
+ * determined by DCC. For single-channel, neither the CPU
+ * nor the GPU do swizzling. For dual channel interleaved,
+ * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+ * 9 for Y tiled. The CPU's interleave is independent, and
+ * can be based on either bit 11 (haven't seen this yet) or
+ * bit 17 (common).
+ */
+ dcc = I915_READ(DCC);
+ switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+ if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+ /* This is the base swizzling by the GPU for
+ * tiled buffers.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+ /* Work around: for fixing 965GM flickering issue on OpenArena */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+ } else {
+ /* Bit 17 swizzling by the CPU in addition. */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_17;
+ }
+ break;
+ }
+ if (dcc == 0xffffffff) {
+ DRM_ERROR("Couldn't read from MCHBAR. "
+ "Disabling tiling.\n");
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+ } else {
+ /* The 965, G33, and newer, have a very flexible memory
+ * configuration. It will enable dual-channel mode
+ * (interleaving) on as much memory as it can, and the GPU
+ * will additionally sometimes enable different bit 6
+ * swizzling for tiled objects from the CPU.
+ *
+ * Here's what I found on the G965:
+ * slot fill memory size swizzling
+ * 0A 0B 1A 1B 1-ch 2-ch
+ * 512 0 0 0 512 0 O
+ * 512 0 512 0 16 1008 X
+ * 512 0 0 512 16 1008 X
+ * 0 512 0 512 16 1008 X
+ * 1024 1024 1024 0 2048 1024 O
+ *
+ * We could probably detect this based on either the DRB
+ * matching, which was the case for the swizzling required in
+ * the table above, or from the 1-ch value being less than
+ * the minimum size of a rank.
+ */
+ if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ }
+ }
+
+ dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+ dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/* Check pitch constriants for all chips & tiling formats */
+static bool
+i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+{
+ int tile_width;
+
+ /* Linear is always fine */
+ if (tiling_mode == I915_TILING_NONE)
+ return true;
+
+ if (IS_GEN2(dev) ||
+ (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ /* check maximum stride & object size */
+ /* i965+ stores the end address of the gtt mapping in the fence
+ * reg, so dont bother to check the size */
+ if (INTEL_INFO(dev)->gen >= 7) {
+ if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
+ return false;
+ } else if (INTEL_INFO(dev)->gen >= 4) {
+ if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
+ return false;
+ } else {
+ if (stride > 8192)
+ return false;
+
+ if (IS_GEN3(dev)) {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+ return false;
+ } else {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+ return false;
+ }
+ }
+
+ if (stride < tile_width)
+ return false;
+
+ /* 965+ just needs multiples of tile width */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (stride & (tile_width - 1))
+ return false;
+ return true;
+ }
+
+ /* Pre-965 needs power of two tile widths */
+ if (stride & (stride - 1))
+ return false;
+
+ return true;
+}
+
+/* Is the current GTT allocation valid for the change in tiling? */
+static bool
+i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
+{
+ u32 size;
+
+ if (tiling_mode == I915_TILING_NONE)
+ return true;
+
+ if (INTEL_INFO(obj->base.dev)->gen >= 4)
+ return true;
+
+ if (INTEL_INFO(obj->base.dev)->gen == 3) {
+ if (obj->gtt_offset & ~I915_FENCE_START_MASK)
+ return false;
+ } else {
+ if (obj->gtt_offset & ~I830_FENCE_START_MASK)
+ return false;
+ }
+
+ size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
+ if (obj->gtt_space->size != size)
+ return false;
+
+ if (obj->gtt_offset & (size - 1))
+ return false;
+
+ return true;
+}
+
+/**
+ * Sets the tiling mode of an object, returning the required swizzling of
+ * bit 6 of addresses in the object.
+ */
+int
+/* LINTED */
+i915_gem_set_tiling(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_set_tiling *args = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ int ret = 0;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL)
+ return -ENOENT;
+
+ if (!i915_tiling_ok(dev,
+ args->stride, obj->base.size, args->tiling_mode)) {
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return -EINVAL;
+ }
+
+ if (obj->pin_count) {
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return -EBUSY;
+ }
+
+ if (args->tiling_mode == I915_TILING_NONE) {
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ args->stride = 0;
+ } else {
+ if (args->tiling_mode == I915_TILING_X)
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ else
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+
+ /* Hide bit 17 swizzling from the user. This prevents old Mesa
+ * from aborting the application on sw fallbacks to bit 17,
+ * and we use the pread/pwrite bit17 paths to swizzle for it.
+ * If there was a user that was relying on the swizzle
+ * information for drm_intel_bo_map()ed reads/writes this would
+ * break it, but we don't have any of those.
+ */
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
+
+ /* If we can't handle the swizzling, make it untiled. */
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
+ args->tiling_mode = I915_TILING_NONE;
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ args->stride = 0;
+ }
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ if (args->tiling_mode != obj->tiling_mode ||
+ args->stride != obj->stride) {
+ /* We need to rebind the object if its current allocation
+ * no longer meets the alignment restrictions for its new
+ * tiling mode. Otherwise we can just leave it alone, but
+ * need to ensure that any fence register is cleared.
+ * the next fenced (either through the GTT or by the BLT unit
+ * on older GPUs) access.
+ *
+ * After updating the tiling parameters, we then flag whether
+ * we need to update an associated fence register. Note this
+ * has to also include the unfenced register the GPU uses
+ * whilst executing a fenced command for an untiled object.
+ */
+
+ obj->map_and_fenceable =
+ obj->gtt_space == NULL ||
+ (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
+ i915_gem_object_fence_ok(obj, args->tiling_mode));
+
+ /* Rebind if we need a change of alignment */
+ if (!obj->map_and_fenceable) {
+ u32 unfenced_alignment =
+ i915_gem_get_gtt_alignment(dev, obj->base.size,
+ args->tiling_mode,
+ false);
+ if (obj->gtt_offset & (unfenced_alignment - 1))
+ ret = i915_gem_object_unbind(obj, 1);
+ }
+
+ if (ret == 0) {
+ obj->fence_dirty =
+ obj->fenced_gpu_access ||
+ obj->fence_reg != I915_FENCE_REG_NONE;
+
+ obj->tiling_mode = args->tiling_mode;
+ obj->stride = args->stride;
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_release_mmap(obj);
+ }
+ }
+ /* we have to maintain this existing ABI... */
+ args->stride = obj->stride;
+ args->tiling_mode = obj->tiling_mode;
+
+ /* Try to preallocate memory required to save swizzling on put-pages */
+ if (i915_gem_object_needs_bit17_swizzle(obj)) {
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
+ sizeof(long), GFP_KERNEL);
+ }
+ } else {
+ if (obj->bit_17 != NULL) {
+ kfree(obj->bit_17, BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
+ sizeof(long));
+ obj->bit_17 = NULL;
+ }
+ }
+
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+/**
+ * Returns the current tiling mode and required bit 6 swizzling for the object.
+ */
+int
+/* LINTED */
+i915_gem_get_tiling(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_gem_get_tiling *args = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ if (&obj->base == NULL)
+ return -ENOENT;
+
+ mutex_lock(&dev->struct_mutex);
+
+ args->tiling_mode = obj->tiling_mode;
+ switch (obj->tiling_mode) {
+ case I915_TILING_X:
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ break;
+ case I915_TILING_Y:
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ break;
+ case I915_TILING_NONE:
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ default:
+ DRM_ERROR("unknown tiling mode\n");
+ }
+
+ /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
+
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/**
+ * Swap every 64 bytes of this page around, to account for it having a new
+ * bit 17 of its physical address and therefore being interpreted differently
+ * by the GPU.
+ */
+static void
+i915_gem_swizzle_page(caddr_t va)
+{
+ char temp[64];
+ char *vaddr;
+ int i;
+
+ vaddr = (char *)(uintptr_t) va;
+
+ for (i = 0; i < PAGE_SIZE; i += 128) {
+ (void) memcpy(temp, &vaddr[i], 64);
+ (void) memcpy(&vaddr[i], &vaddr[i + 64], 64);
+ (void) memcpy(&vaddr[i + 64], temp, 64);
+ }
+
+}
+
+void
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+ int page_count = obj->base.size >> PAGE_SHIFT;
+ int i;
+
+ if (obj->bit_17 == NULL)
+ return;
+
+ for (i = 0; i < page_count; i++) {
+ char new_bit_17 = (char)(page_to_phys(obj->page_list[i]) >> 17);
+ if ((new_bit_17 & 0x1) !=
+ (test_bit(i, obj->bit_17) != 0)) {
+ i915_gem_swizzle_page(obj->page_list[i]);
+ }
+ }
+}
+
+void
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+ int page_count = obj->base.size >> PAGE_SHIFT;
+ int i;
+
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
+ sizeof(long), GFP_KERNEL);
+ if (obj->bit_17 == NULL) {
+ DRM_ERROR("Failed to allocate memory for bit 17 "
+ "record\n");
+ return;
+ }
+ }
+
+ for (i = 0; i < page_count; i++) {
+ if (page_to_phys(obj->page_list[i]) & (1 << 17))
+ set_bit(i, obj->bit_17);
+ else
+ clear_bit(i, obj->bit_17);
+ }
+}
diff --git a/usr/src/uts/intel/io/i915/i915_io32.c b/usr/src/uts/intel/io/i915/i915_io32.c
new file mode 100644
index 0000000..89e37c4
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_io32.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include "drm.h"
+#include "drmP.h"
+#include "i915_drm.h"
+
+int
+copyin32_i915_batchbuffer(void * dest, void * src)
+{
+ struct drm_i915_batchbuffer *dest64 = dest;
+ struct drm_i915_batchbuffer32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof (dest32));
+
+ dest64->start = dest32.start;
+ dest64->used = dest32.used;
+ dest64->DR1 = dest32.DR1;
+ dest64->DR4 = dest32.DR4;
+ dest64->num_cliprects = dest32.num_cliprects;
+ dest64->cliprects = (drm_clip_rect_t __user *)(uintptr_t)dest32.cliprects;
+
+ return (0);
+}
+
+int
+copyin32_i915_irq_emit(void *dest, void *src)
+{
+ struct drm_i915_irq_emit *dest64 = dest;
+ struct drm_i915_irq_emit32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof (dest32));
+
+ dest64->irq_seq = (int __user *)(uintptr_t)dest32.irq_seq;
+
+ return (0);
+}
+
+int
+copyin32_i915_getparam(void *dest, void *src)
+{
+ struct drm_i915_getparam *dest64 = dest;
+ struct drm_i915_getparam32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof (dest32));
+
+ dest64->param = dest32.param;
+ dest64->value = (int __user *)(uintptr_t)dest32.value;
+
+ return (0);
+}
+
+int
+copyin32_i915_cmdbuffer(void * dest, void * src)
+{
+ struct _drm_i915_cmdbuffer *dest64 = dest;
+ struct drm_i915_cmdbuffer32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof (dest32));
+
+ dest64->buf = (char __user *)(uintptr_t)dest32.buf;
+ dest64->sz = dest32.sz;
+ dest64->DR1 = dest32.DR1;
+ dest64->DR4 = dest32.DR4;
+ dest64->num_cliprects = dest32.num_cliprects;
+ dest64->cliprects = (drm_clip_rect_t __user *)(uintptr_t)dest32.cliprects;
+
+ return (0);
+}
+
+int
+copyin32_i915_mem_alloc(void *dest, void *src)
+{
+ struct drm_i915_mem_alloc *dest64 = dest;
+ struct drm_i915_mem_alloc32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof (dest32));
+
+ dest64->region = dest32.region;
+ dest64->alignment = dest32.alignment;
+ dest64->size = dest32.size;
+ dest64->region_offset = (int *)(uintptr_t)dest32.region_offset;
+
+ return (0);
+}
diff --git a/usr/src/uts/intel/io/i915/i915_io32.h b/usr/src/uts/intel/io/i915/i915_io32.h
new file mode 100644
index 0000000..4f16dc1
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_io32.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef _I915_IO32_H_
+#define _I915_IO32_H_
+
+#ifdef _MULTI_DATAMODEL
+
+extern int copyin32_i915_mem_alloc(void *dest, void *src);
+extern int copyin32_i915_irq_emit(void *dest, void *src);
+extern int copyin32_i915_getparam(void *dest, void *src);
+extern int copyin32_i915_cmdbuffer(void * dest, void * src);
+extern int copyin32_i915_batchbuffer(void * dest, void * src);
+
+#endif
+
+#endif /* _I915_IO32_H_ */
diff --git a/usr/src/uts/intel/io/i915/i915_irq.c b/usr/src/uts/intel/io/i915/i915_irq.c
new file mode 100644
index 0000000..518efe6
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_irq.c
@@ -0,0 +1,3657 @@
+/*
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2009, 2012, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+static const u32 hpd_ibx[] = {
+ [HPD_CRT] = SDE_CRT_HOTPLUG,
+ [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
+ [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
+ [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG
+};
+
+static const u32 hpd_cpt[] = {
+ [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
+ [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
+ [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
+ [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
+};
+
+static const u32 hpd_mask_i915[] = {
+ [HPD_CRT] = CRT_HOTPLUG_INT_EN,
+ [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
+ [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
+ [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
+ [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
+};
+
+static const u32 hpd_status_gen4[] = {
+ [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
+ [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
+ [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
+ [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
+ [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+};
+
+static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
+ [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
+ [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
+ [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
+ [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
+ [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+};
+
+/* For display hotplug interrupt */
+static void
+ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if ((dev_priv->irq_mask & mask) != 0) {
+ dev_priv->irq_mask &= ~mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
+ }
+}
+
+static void
+ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if ((dev_priv->irq_mask & mask) != mask) {
+ dev_priv->irq_mask |= mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
+ }
+}
+
+static bool ivb_can_enable_err_int(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ enum pipe pipe;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ if (crtc->cpu_fifo_underrun_disabled)
+ return false;
+ }
+
+ return true;
+}
+
+static bool cpt_can_enable_serr_int(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ if (crtc->pch_fifo_underrun_disabled)
+ return false;
+ }
+
+ return true;
+}
+
+static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
+ DE_PIPEB_FIFO_UNDERRUN;
+
+ if (enable)
+ ironlake_enable_display_irq(dev_priv, bit);
+ else
+ ironlake_disable_display_irq(dev_priv, bit);
+}
+
+static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (enable) {
+ if (!ivb_can_enable_err_int(dev))
+ return;
+
+ I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
+ ERR_INT_FIFO_UNDERRUN_B |
+ ERR_INT_FIFO_UNDERRUN_C);
+
+ ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ } else {
+ ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ }
+}
+
+static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
+ bool enable)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
+ SDE_TRANSB_FIFO_UNDER;
+
+ if (enable)
+ I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
+ else
+ I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
+
+ POSTING_READ(SDEIMR);
+}
+
+static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (enable) {
+ if (!cpt_can_enable_serr_int(dev))
+ return;
+
+ I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
+ SERR_INT_TRANS_B_FIFO_UNDERRUN |
+ SERR_INT_TRANS_C_FIFO_UNDERRUN);
+
+ I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
+ } else {
+ I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
+ }
+
+ POSTING_READ(SDEIMR);
+}
+
+/**
+ * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
+ * @dev: drm device
+ * @pipe: pipe
+ * @enable: true if we want to report FIFO underrun errors, false otherwise
+ *
+ * This function makes us disable or enable CPU fifo underruns for a specific
+ * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
+ * reporting for one pipe may also disable all the other CPU error interruts for
+ * the other pipes, due to the fact that there's just one interrupt mask/enable
+ * bit for all the pipes.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+ ret = !intel_crtc->cpu_fifo_underrun_disabled;
+
+ if (enable == ret)
+ goto done;
+
+ intel_crtc->cpu_fifo_underrun_disabled = !enable;
+
+ if (IS_GEN5(dev) || IS_GEN6(dev))
+ ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
+ else if (IS_GEN7(dev))
+ ivybridge_set_fifo_underrun_reporting(dev, enable);
+
+done:
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ return ret;
+}
+
+/**
+ * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
+ * @dev: drm device
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ * @enable: true if we want to report FIFO underrun errors, false otherwise
+ *
+ * This function makes us disable or enable PCH fifo underruns for a specific
+ * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
+ * underrun reporting for one transcoder may also disable all the other PCH
+ * error interruts for the other transcoders, due to the fact that there's just
+ * one interrupt mask/enable bit for all the transcoders.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe p;
+ struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+ unsigned long flags;
+ bool ret;
+
+ if (HAS_PCH_LPT(dev)) {
+ crtc = NULL;
+ for_each_pipe(p) {
+ struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
+ if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
+ crtc = c;
+ break;
+ }
+ }
+ if (!crtc) {
+ DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
+ return false;
+ }
+ } else {
+ crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
+ }
+ intel_crtc = to_intel_crtc(crtc);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+ ret = !intel_crtc->pch_fifo_underrun_disabled;
+
+ if (enable == ret)
+ goto done;
+
+ intel_crtc->pch_fifo_underrun_disabled = !enable;
+
+ if (HAS_PCH_IBX(dev))
+ ibx_set_fifo_underrun_reporting(intel_crtc, enable);
+ else
+ cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
+
+done:
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ return ret;
+}
+
+
+void
+i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+{
+ u32 reg = PIPESTAT(pipe);
+ u32 pipestat = I915_READ(reg) & 0x7fff0000;
+
+ if ((pipestat & mask) == mask)
+ return;
+
+ /* Enable the interrupt, clear any pending status */
+ pipestat |= mask | (mask >> 16);
+ I915_WRITE(reg, pipestat);
+ POSTING_READ(reg);
+}
+
+void
+i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+{
+ u32 reg = PIPESTAT(pipe);
+ u32 pipestat = I915_READ(reg) & 0x7fff0000;
+
+ if ((pipestat & mask) == 0)
+ return;
+
+ pipestat &= ~mask;
+ I915_WRITE(reg, pipestat);
+ POSTING_READ(reg);
+ }
+
+/**
+ * i915_pipe_enabled - check if a pipe is enabled
+ * @dev: DRM device
+ * @pipe: pipe to check
+ *
+ * Reading certain registers when the pipe is disabled can hang the chip.
+ * Use this routine to make sure the PLL is running and the pipe is active
+ * before reading such registers if unsure.
+ */
+static int
+i915_pipe_enabled(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Locking is horribly broken here, but whatever. */
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ return intel_crtc->active;
+ } else {
+ return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
+ }
+}
+
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long high_frame;
+ unsigned long low_frame;
+ u32 high1, high2, low;
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+ "pipe %c\n", pipe_name(pipe));
+ return 0;
+ }
+
+ high_frame = PIPEFRAME(pipe);
+ low_frame = PIPEFRAMEPIXEL(pipe);
+
+ /*
+ * High & low register fields aren't synchronized, so make sure
+ * we get a low value that's stable across two reads of the high
+ * register.
+ */
+ do {
+ high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+ low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
+ high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+ } while (high1 != high2);
+
+ high1 >>= PIPE_FRAME_HIGH_SHIFT;
+ low >>= PIPE_FRAME_LOW_SHIFT;
+ return (high1 << 8) | low;
+}
+
+static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int reg = PIPE_FRMCOUNT_GM45(pipe);
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+ "pipe %c\n", pipe_name(pipe));
+ return 0;
+ }
+
+ return I915_READ(reg);
+}
+
+static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+ int *vpos, int *hpos)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 vbl = 0, position = 0;
+ int vbl_start, vbl_end, htotal, vtotal;
+ bool in_vbl = true;
+ int ret = 0;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
+ "pipe %c\n", pipe_name(pipe));
+ return 0;
+ }
+
+ /* Get vtotal. */
+ vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* No obvious pixelcount register. Only query vertical
+ * scanout position from Display scan line register.
+ */
+ position = I915_READ(PIPEDSL(pipe));
+
+ /* Decode into vertical scanout position. Don't have
+ * horizontal scanout position.
+ */
+ *vpos = position & 0x1fff;
+ *hpos = 0;
+ } else {
+ /* Have access to pixelcount since start of frame.
+ * We can split this into vertical and horizontal
+ * scanout position.
+ */
+ position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+ htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+ *vpos = position / htotal;
+ *hpos = position - (*vpos * htotal);
+ }
+
+ /* Query vblank area. */
+ vbl = I915_READ(VBLANK(cpu_transcoder));
+
+ /* Test position against vblank region. */
+ vbl_start = vbl & 0x1fff;
+ vbl_end = (vbl >> 16) & 0x1fff;
+
+ if ((*vpos < vbl_start) || (*vpos > vbl_end))
+ in_vbl = false;
+
+ /* Inside "upper part" of vblank area? Apply corrective offset: */
+ if (in_vbl && (*vpos >= vbl_start))
+ *vpos = *vpos - vtotal;
+
+ /* Readouts valid? */
+ if (vbl > 0)
+ ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+ /* In vblank? */
+ if (in_vbl)
+ ret |= DRM_SCANOUTPOS_INVBL;
+
+ return ret;
+}
+
+static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct drm_crtc *crtc;
+
+ if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
+ return -EINVAL;
+ }
+
+ /* Get drm_crtc to timestamp: */
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+ if (crtc == NULL) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
+ return -EINVAL;
+ }
+
+ if (!crtc->enabled) {
+ DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+ return -EBUSY;
+ }
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+ vblank_time, flags,
+ crtc);
+}
+
+static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
+{
+ enum drm_connector_status old_status;
+
+ WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ old_status = connector->status;
+
+ connector->status = connector->funcs->detect(connector, false);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ old_status, connector->status);
+ return (old_status != connector->status);
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
+
+static void i915_hotplug_work_func(struct work_struct *work)
+{
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ hotplug_work);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_connector *intel_connector;
+ struct intel_encoder *intel_encoder;
+ struct drm_connector *connector;
+ unsigned long irqflags;
+ bool hpd_disabled = false;
+ bool changed = false;
+ u32 hpd_event_bits;
+
+ /* HPD irq before everything is fully set up. */
+ if (!dev_priv->enable_hotplug_processing)
+ return;
+
+ mutex_lock(&mode_config->mutex);
+ DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+
+ hpd_event_bits = dev_priv->hpd_event_bits;
+ dev_priv->hpd_event_bits = 0;
+ list_for_each_entry(connector, struct drm_connector, &mode_config->connector_list, head) {
+ intel_connector = to_intel_connector(connector);
+ intel_encoder = intel_connector->encoder;
+ if (intel_encoder->hpd_pin > HPD_NONE &&
+ dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
+ connector->polled == DRM_CONNECTOR_POLL_HPD) {
+ DRM_INFO("HPD interrupt storm detected on connector %s: "
+ "switching from hotplug detection to polling\n",
+ drm_get_connector_name(connector));
+ dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT
+ | DRM_CONNECTOR_POLL_DISCONNECT;
+ hpd_disabled = true;
+ }
+ if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+ DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
+ drm_get_connector_name(connector), intel_encoder->hpd_pin);
+ }
+ }
+ /* if there were no outputs to poll, poll was disabled,
+ * therefore make sure it's enabled when disabling HPD on
+ * some connectors */
+ if (hpd_disabled) {
+ drm_kms_helper_poll_enable(dev);
+ mod_timer(&dev_priv->hotplug_reenable_timer,
+ msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
+ }
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ list_for_each_entry(connector, struct drm_connector, &mode_config->connector_list, head) {
+ intel_connector = to_intel_connector(connector);
+ intel_encoder = intel_connector->encoder;
+ if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+ if (intel_encoder->hot_plug)
+ intel_encoder->hot_plug(intel_encoder);
+ if (intel_hpd_irq_event(dev, connector))
+ changed = true;
+ }
+ }
+ mutex_unlock(&mode_config->mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
+}
+
+static void ironlake_handle_rps_change(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 busy_up, busy_down, max_avg, min_avg;
+ u8 new_delay;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mchdev_lock, flags);
+
+ I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+
+ new_delay = dev_priv->ips.cur_delay;
+
+ I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
+ busy_up = I915_READ(RCPREVBSYTUPAVG);
+ busy_down = I915_READ(RCPREVBSYTDNAVG);
+ max_avg = I915_READ(RCBMAXAVG);
+ min_avg = I915_READ(RCBMINAVG);
+
+ /* Handle RCS change request from hw */
+ if (busy_up > max_avg) {
+ if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
+ new_delay = dev_priv->ips.cur_delay - 1;
+ if (new_delay < dev_priv->ips.max_delay)
+ new_delay = dev_priv->ips.max_delay;
+ } else if (busy_down < min_avg) {
+ if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
+ new_delay = dev_priv->ips.cur_delay + 1;
+ if (new_delay > dev_priv->ips.min_delay)
+ new_delay = dev_priv->ips.min_delay;
+ }
+
+ if (ironlake_set_drps(dev, new_delay))
+ dev_priv->ips.cur_delay = new_delay;
+
+ spin_unlock_irqrestore(&mchdev_lock, flags);
+
+ return;
+}
+
+static void notify_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (ring->obj == NULL)
+ return;
+
+ DRM_WAKEUP(&ring->irq_queue);
+ if (i915_enable_hangcheck && !dev_priv->gpu_hang) {
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ }
+}
+
+static void gen6_pm_rps_work(struct work_struct *work)
+{
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ rps.work);
+ u32 pm_iir, pm_imr;
+ u8 new_delay;
+
+ spin_lock_irq(&dev_priv->rps.lock);
+ pm_iir = dev_priv->rps.pm_iir;
+ dev_priv->rps.pm_iir = 0;
+ pm_imr = I915_READ(GEN6_PMIMR);
+ I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
+ spin_unlock_irq(&dev_priv->rps.lock);
+
+ if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
+ return;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+ new_delay = dev_priv->rps.cur_delay + 1;
+
+ /*
+ * For better performance, jump directly
+ * to RPe if we're below it.
+ */
+ if (IS_VALLEYVIEW(dev_priv->dev) &&
+ dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
+ new_delay = dev_priv->rps.rpe_delay;
+ } else
+ new_delay = dev_priv->rps.cur_delay - 1;
+
+ /* sysfs frequency interfaces may have snuck in while servicing the
+ * interrupt
+ */
+ if (new_delay >= dev_priv->rps.min_delay &&
+ new_delay <= dev_priv->rps.max_delay) {
+ if (IS_VALLEYVIEW(dev_priv->dev))
+ valleyview_set_rps(dev_priv->dev, new_delay);
+ else
+ gen6_set_rps(dev_priv->dev, new_delay);
+ }
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ }
+
+
+/**
+ * ivybridge_parity_work - Workqueue called when a parity error interrupt
+ * occurred.
+ * @work: workqueue struct
+ *
+ * Doesn't actually do anything except notify userspace. As a consequence of
+ * this event, userspace should try to remap the bad rows since statistically
+ * it is likely the same row is more likely to go bad again.
+ */
+static void ivybridge_parity_work(struct work_struct *work)
+{
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ l3_parity.error_work);
+ u32 error_status, row, bank, subbank;
+ uint32_t misccpctl;
+ unsigned long flags;
+
+ /* We must turn off DOP level clock gating to access the L3 registers.
+ * In order to prevent a get/put style interface, acquire struct mutex
+ * any time we access those registers.
+ */
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ POSTING_READ(GEN7_MISCCPCTL);
+
+ error_status = I915_READ(GEN7_L3CDERRST1);
+ row = GEN7_PARITY_ERROR_ROW(error_status);
+ bank = GEN7_PARITY_ERROR_BANK(error_status);
+ subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+
+ I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
+ GEN7_L3CDERRST1_ENABLE);
+ POSTING_READ(GEN7_L3CDERRST1);
+
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+
+ DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
+ row, bank, subbank);
+}
+
+static void ivybridge_handle_parity_error(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long flags;
+
+ if (!HAS_L3_GPU_CACHE(dev))
+ return;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
+}
+
+static void snb_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+
+ if (gt_iir &
+ (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & GT_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+ if (gt_iir & GT_BLT_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[BCS]);
+
+ if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
+ GT_BSD_CS_ERROR_INTERRUPT |
+ GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
+ DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
+ i915_handle_error(dev, false);
+ }
+
+ if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+ ivybridge_handle_parity_error(dev);
+}
+
+static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
+ u32 pm_iir)
+{
+ unsigned long flags;
+
+ /*
+ * IIR bits should never already be set because IMR should
+ * prevent an interrupt from being shown in IIR. The warning
+ * displays a case where we've unsafely cleared
+ * dev_priv->rps.pm_iir. Although missing an interrupt of the same
+ * type is not a problem, it displays a problem in the logic.
+ *
+ * The mask bit in IMR is cleared by dev_priv->rps.work.
+ */
+
+ spin_lock_irqsave(&dev_priv->rps.lock, flags);
+ dev_priv->rps.pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
+ POSTING_READ(GEN6_PMIMR);
+ spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
+}
+
+#define HPD_STORM_DETECT_PERIOD 1000
+#define HPD_STORM_THRESHOLD 5
+
+static inline void intel_hpd_irq_handler(struct drm_device *dev,
+ u32 hotplug_trigger,
+ const u32 *hpd)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+ bool storm_detected = false;
+
+ if (!hotplug_trigger)
+ return;
+
+ spin_lock(&dev_priv->irq_lock);
+ for (i = 1; i < HPD_NUM_PINS; i++) {
+
+ if (!(hpd[i] & hotplug_trigger) ||
+ dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
+ continue;
+
+ dev_priv->hpd_event_bits |= (1 << i);
+ if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
+ dev_priv->hpd_stats[i].hpd_last_jiffies
+ + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
+ dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
+ dev_priv->hpd_stats[i].hpd_cnt = 0;
+ } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
+ dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
+ dev_priv->hpd_event_bits &= ~(1 << i);
+ DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
+ storm_detected = true;
+ } else {
+ dev_priv->hpd_stats[i].hpd_cnt++;
+ }
+ }
+
+ if (storm_detected)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock(&dev_priv->irq_lock);
+
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
+}
+
+static void gmbus_irq_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ wake_up_all(&dev_priv->gmbus_wait_queue);
+}
+
+static void dp_aux_irq_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ wake_up_all(&dev_priv->gmbus_wait_queue);
+}
+
+/* Unlike gen6_queue_rps_work() from which this function is originally derived,
+ * we must be able to deal with other PM interrupts. This is complicated because
+ * of the way in which we use the masks to defer the RPS work (which for
+ * posterity is necessary because of forcewake).
+ */
+static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
+ u32 pm_iir)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->rps.lock, flags);
+ dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
+ if (dev_priv->rps.pm_iir) {
+ I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
+ /* never want to mask useful interrupts. (also posting read) */
+ WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
+ /* TODO: if queue_work is slow, move it out of the spinlock */
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
+ }
+ spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+
+ if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
+ if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+ notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
+
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
+ DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
+ i915_handle_error(dev_priv->dev, false);
+ }
+ }
+}
+
+static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
+{
+ /* LINTED */
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 iir, gt_iir, pm_iir;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long irqflags;
+ int pipe;
+ u32 pipe_stats[I915_MAX_PIPES] = { 0 };
+
+ atomic_inc(&dev_priv->irq_received);
+
+ while (true) {
+ iir = I915_READ(VLV_IIR);
+ gt_iir = I915_READ(GTIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
+
+ if (gt_iir == 0 && pm_iir == 0 && iir == 0)
+ goto out;
+
+ ret = IRQ_HANDLED;
+
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(dev, pipe);
+
+ if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
+ }
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+
+ intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ I915_READ(PORT_HOTPLUG_STAT);
+ }
+
+ if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+ gmbus_irq_handler(dev);
+
+ if (pm_iir & GEN6_PM_RPS_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
+
+ I915_WRITE(GTIIR, gt_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ I915_WRITE(VLV_IIR, iir);
+ }
+
+out:
+ return ret;
+}
+
+static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+ u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
+
+ intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
+
+ if (pch_iir & SDE_AUDIO_POWER_MASK) {
+ int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
+ SDE_AUDIO_POWER_SHIFT);
+ DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+ port_name(port));
+ }
+
+ if (pch_iir & SDE_AUX_MASK)
+ dp_aux_irq_handler(dev);
+
+ if (pch_iir & SDE_GMBUS)
+ gmbus_irq_handler(dev);
+
+ if (pch_iir & SDE_AUDIO_HDCP_MASK)
+ DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_TRANS_MASK)
+ DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
+
+ if (pch_iir & SDE_POISON)
+ DRM_ERROR("PCH poison interrupt\n");
+
+ if (pch_iir & SDE_FDI_MASK)
+ for_each_pipe(pipe)
+ DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+
+ if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
+ DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
+
+ if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
+ DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
+
+ if (pch_iir & SDE_TRANSA_FIFO_UNDER)
+ if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
+ false))
+ DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
+
+ if (pch_iir & SDE_TRANSB_FIFO_UNDER)
+ if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
+ false))
+ DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
+}
+
+static void ivb_err_int_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 err_int = I915_READ(GEN7_ERR_INT);
+
+ if (err_int & ERR_INT_POISON)
+ DRM_ERROR("Poison interrupt\n");
+
+ if (err_int & ERR_INT_FIFO_UNDERRUN_A)
+ if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
+ DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+
+ if (err_int & ERR_INT_FIFO_UNDERRUN_B)
+ if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
+ DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+
+ if (err_int & ERR_INT_FIFO_UNDERRUN_C)
+ if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
+ DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
+
+ I915_WRITE(GEN7_ERR_INT, err_int);
+}
+
+static void cpt_serr_int_handler(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 serr_int = I915_READ(SERR_INT);
+
+ if (serr_int & SERR_INT_POISON)
+ DRM_ERROR("PCH poison interrupt\n");
+
+ if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
+ if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
+ false))
+ DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
+
+ if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
+ if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
+ false))
+ DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
+
+ if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
+ if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
+ false))
+ DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
+
+ I915_WRITE(SERR_INT, serr_int);
+}
+
+static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+ u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
+
+ intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
+
+ if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
+ int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
+ SDE_AUDIO_POWER_SHIFT_CPT);
+ DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
+ port_name(port));
+ }
+
+ if (pch_iir & SDE_AUX_MASK_CPT)
+ dp_aux_irq_handler(dev);
+
+ if (pch_iir & SDE_GMBUS_CPT)
+ gmbus_irq_handler(dev);
+
+ if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
+ DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
+ DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+
+ if (pch_iir & SDE_FDI_MASK_CPT)
+ for_each_pipe(pipe)
+ DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+
+ if (pch_iir & SDE_ERROR_CPT)
+ cpt_serr_int_handler(dev);
+}
+
+static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+{
+ /* LINTED */
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
+ irqreturn_t ret = IRQ_NONE;
+ int i;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ /* We get interrupts on unclaimed registers, so check for this before we
+ * do any I915_{READ,WRITE}. */
+ if (IS_HASWELL(dev) &&
+ (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_ERROR("Unclaimed register before interrupt\n");
+ I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+
+ /* disable master interrupt before clearing iir */
+ de_ier = I915_READ(DEIER);
+ I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+
+ /* Disable south interrupts. We'll only write to SDEIIR once, so further
+ * interrupts will will be stored on its back queue, and then we'll be
+ * able to process them after we restore SDEIER (as soon as we restore
+ * it, we'll get an interrupt if SDEIIR still has something to process
+ * due to its back queue). */
+ if (!HAS_PCH_NOP(dev)) {
+ sde_ier = I915_READ(SDEIER);
+ I915_WRITE(SDEIER, 0);
+ POSTING_READ(SDEIER);
+ }
+
+ /* On Haswell, also mask ERR_INT because we don't want to risk
+ * generating "unclaimed register" interrupts from inside the interrupt
+ * handler. */
+ if (IS_HASWELL(dev)) {
+ spin_lock(&dev_priv->irq_lock);
+ ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ spin_unlock(&dev_priv->irq_lock);
+ }
+
+ gt_iir = I915_READ(GTIIR);
+ if (gt_iir) {
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ I915_WRITE(GTIIR, gt_iir);
+ ret = IRQ_HANDLED;
+ }
+
+ de_iir = I915_READ(DEIIR);
+ if (de_iir) {
+ if (de_iir & DE_ERR_INT_IVB)
+ ivb_err_int_handler(dev);
+
+ if (de_iir & DE_AUX_CHANNEL_A_IVB)
+ dp_aux_irq_handler(dev);
+
+
+ for (i = 0; i < 3; i++) {
+ if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+ drm_handle_vblank(dev, i);
+ if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+ intel_prepare_page_flip(dev, i);
+ intel_finish_page_flip_plane(dev, i);
+ }
+ }
+
+ /* check event from PCH */
+ if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
+ u32 pch_iir = I915_READ(SDEIIR);
+
+ cpt_irq_handler(dev, pch_iir);
+
+ /* clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ }
+
+ I915_WRITE(DEIIR, de_iir);
+ ret = IRQ_HANDLED;
+ }
+
+ pm_iir = I915_READ(GEN6_PMIIR);
+ if (pm_iir) {
+ if (IS_HASWELL(dev))
+ hsw_pm_irq_handler(dev_priv, pm_iir);
+ else if (pm_iir & GEN6_PM_RPS_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ ret = IRQ_HANDLED;
+ }
+
+ if (IS_HASWELL(dev)) {
+ spin_lock(&dev_priv->irq_lock);
+ if (ivb_can_enable_err_int(dev))
+ ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ spin_unlock(&dev_priv->irq_lock);
+ }
+
+ I915_WRITE(DEIER, de_ier);
+ POSTING_READ(DEIER);
+ if (!HAS_PCH_NOP(dev)) {
+ I915_WRITE(SDEIER, sde_ier);
+ POSTING_READ(SDEIER);
+ }
+
+ return ret;
+}
+
+static void ilk_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+ if (gt_iir &
+ (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & ILK_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+}
+
+static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *)(uintptr_t) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int ret = IRQ_NONE;
+ u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ /* disable master interrupt before clearing iir */
+ de_ier = I915_READ(DEIER);
+ I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+ POSTING_READ(DEIER);
+
+ /* Disable south interrupts. We'll only write to SDEIIR once, so further
+ * interrupts will will be stored on its back queue, and then we'll be
+ * able to process them after we restore SDEIER (as soon as we restore
+ * it, we'll get an interrupt if SDEIIR still has something to process
+ * due to its back queue). */
+ sde_ier = I915_READ(SDEIER);
+ I915_WRITE(SDEIER, 0);
+ POSTING_READ(SDEIER);
+
+ de_iir = I915_READ(DEIIR);
+ gt_iir = I915_READ(GTIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
+
+ if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
+ goto done;
+
+ ret = IRQ_HANDLED;
+
+ if (IS_GEN5(dev))
+ ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+ else
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+
+ if (de_iir & DE_AUX_CHANNEL_A)
+ dp_aux_irq_handler(dev);
+
+
+ if (de_iir & DE_PIPEA_VBLANK)
+ drm_handle_vblank(dev, 0);
+
+ if (de_iir & DE_PIPEB_VBLANK)
+ drm_handle_vblank(dev, 1);
+
+ if (de_iir & DE_POISON)
+ DRM_ERROR("Poison interrupt\n");
+
+ if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
+ if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
+ DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+
+ if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
+ if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
+ DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+
+ if (de_iir & DE_PLANEA_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
+ }
+
+ if (de_iir & DE_PLANEB_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip_plane(dev, 1);
+ }
+
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT) {
+ u32 pch_iir = I915_READ(SDEIIR);
+
+ if (HAS_PCH_CPT(dev))
+ cpt_irq_handler(dev, pch_iir);
+ else
+ ibx_irq_handler(dev, pch_iir);
+
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ }
+
+ if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
+ ironlake_handle_rps_change(dev);
+
+ if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
+
+ I915_WRITE(GTIIR, gt_iir);
+ I915_WRITE(DEIIR, de_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+
+done:
+ I915_WRITE(DEIER, de_ier);
+ POSTING_READ(DEIER);
+ I915_WRITE(SDEIER, sde_ier);
+ POSTING_READ(SDEIER);
+
+ return ret;
+}
+
+/**
+ * i915_error_work_func - do process context error handling work
+ * @work: work struct
+ *
+ * Fire an error uevent so userspace can see that a hang or error
+ * was detected.
+ */
+static void i915_error_work_func(struct work_struct *work)
+{
+ struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
+ work);
+ drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
+ gpu_error);
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_ring_buffer *ring;
+ /* LINTED */
+ char *error_event[] = { "ERROR=1", NULL };
+ /* LINTED */
+ char *reset_event[] = { "RESET=1", NULL };
+ /* LINTED */
+ char *reset_done_event[] = { "ERROR=0", NULL };
+ int i, ret;
+
+ DRM_DEBUG_DRIVER("generating error event\n");
+ /* OSOL_i915: kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
+
+ /*
+ * Note that there's only one work item which does gpu resets, so we
+ * need not worry about concurrent gpu resets potentially incrementing
+ * error->reset_counter twice. We only need to take care of another
+ * racing irq/hangcheck declaring the gpu dead for a second time. A
+ * quick check for that is good enough: schedule_work ensures the
+ * correct ordering between hang detection and this work item, and since
+ * the reset in-progress bit is only ever set by code outside of this
+ * work we don't need to worry about any other races.
+ */
+ if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
+ DRM_DEBUG_DRIVER("resetting chip\n");
+ /* OSOL_i915: kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */
+ ret = i915_reset(dev);
+
+ if (ret == 0) {
+ /*
+ * After all the gem state is reset, increment the reset
+ * counter and wake up everyone waiting for the reset to
+ * complete.
+ *
+ * Since unlock operations are a one-sided barrier only,
+ * we need to insert a barrier here to order any seqno
+ * updates before
+ * the counter increment.
+ */
+ atomic_inc(&dev_priv->gpu_error.reset_counter);
+ if (gpu_dump > 0) {
+ for_each_ring(ring, dev_priv, i)
+ ring_dump(dev, ring);
+ register_dump(dev);
+ gtt_dump(dev);
+ }
+ } else {
+ atomic_set(&error->reset_counter, I915_WEDGED);
+ }
+
+ for_each_ring(ring, dev_priv, i)
+ wake_up_all(&ring->irq_queue);
+
+ wake_up_all(&dev_priv->gpu_error.reset_queue);
+ DRM_INFO("resetting done");
+ }
+}
+
+/* NB: please notice the memset */
+static void i915_get_extra_instdone(struct drm_device *dev,
+ uint32_t *instdone)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ (void) memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+ switch(INTEL_INFO(dev)->gen) {
+ case 2:
+ case 3:
+ instdone[0] = I915_READ(INSTDONE);
+ break;
+ case 4:
+ case 5:
+ case 6:
+ instdone[0] = I915_READ(INSTDONE_I965);
+ instdone[1] = I915_READ(INSTDONE1);
+ break;
+ default:
+ DRM_INFO("Unsupported platform\n");
+ case 7:
+ instdone[0] = I915_READ(GEN7_INSTDONE_1);
+ instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+ instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+ instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+ break;
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct drm_i915_error_object *
+i915_error_object_create_sized(struct drm_i915_private *dev_priv,
+ struct drm_i915_gem_object *src,
+ const int num_pages)
+{
+ struct drm_i915_error_object *dst;
+ int i;
+ u32 reloc_offset;
+
+ if (src == NULL || src->pages == NULL)
+ return NULL;
+
+ dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+ if (dst == NULL)
+ return NULL;
+
+ reloc_offset = src->gtt_offset;
+ for (i = 0; i < num_pages; i++) {
+ unsigned long flags;
+ void *d;
+
+ d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (d == NULL)
+ goto unwind;
+
+ local_irq_save(flags);
+ if (reloc_offset < dev_priv->gtt.mappable_end &&
+ src->has_global_gtt_mapping) {
+ void __iomem *s;
+
+ /* Simply ignore tiling or any overlapping fence.
+ * It's part of the error state, and this hopefully
+ * captures what the GPU read.
+ */
+
+ s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ reloc_offset);
+ memcpy_fromio(d, s, PAGE_SIZE);
+ io_mapping_unmap_atomic(s);
+ } else if (src->stolen) {
+ unsigned long offset;
+
+ offset = dev_priv->mm.stolen_base;
+ offset += src->stolen->start;
+ offset += i << PAGE_SHIFT;
+
+ memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
+ } else {
+ struct page *page;
+ void *s;
+
+ page = i915_gem_object_get_page(src, i);
+
+ drm_clflush_pages(&page, 1);
+
+ s = kmap_atomic(page);
+ memcpy(d, s, PAGE_SIZE);
+ kunmap_atomic(s);
+
+ drm_clflush_pages(&page, 1);
+ }
+ local_irq_restore(flags);
+
+ dst->pages[i] = d;
+
+ reloc_offset += PAGE_SIZE;
+ }
+ dst->page_count = num_pages;
+ dst->gtt_offset = src->gtt_offset;
+
+ return dst;
+
+unwind:
+ while (i--)
+ kfree(dst->pages[i]);
+ kfree(dst);
+ return NULL;
+}
+#define i915_error_object_create(dev_priv, src) \
+ i915_error_object_create_sized((dev_priv), (src), \
+ (src)->base.size>>PAGE_SHIFT)
+
+static void
+i915_error_object_free(struct drm_i915_error_object *obj)
+{
+ int page;
+
+ if (obj == NULL)
+ return;
+
+ for (page = 0; page < obj->page_count; page++)
+ kfree(obj->pages[page]);
+
+ kfree(obj);
+}
+
+void
+i915_error_state_free(struct kref *error_ref)
+{
+ struct drm_i915_error_state *error = container_of(error_ref,
+ typeof(*error), ref);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ i915_error_object_free(error->ring[i].batchbuffer);
+ i915_error_object_free(error->ring[i].ringbuffer);
+ i915_error_object_free(error->ring[i].ctx);
+ kfree(error->ring[i].requests);
+ }
+
+ kfree(error->active_bo);
+ kfree(error->overlay);
+ kfree(error->display);
+ kfree(error);
+}
+static void capture_bo(struct drm_i915_error_buffer *err,
+ struct drm_i915_gem_object *obj)
+{
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->rseqno = obj->last_read_seqno;
+ err->wseqno = obj->last_write_seqno;
+ err->gtt_offset = obj->gtt_offset;
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (obj->pin_count > 0)
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->ring = obj->ring ? obj->ring->id : -1;
+ err->cache_level = obj->cache_level;
+}
+
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, mm_list) {
+ capture_bo(err++, obj);
+ if (++i == count)
+ break;
+ }
+
+ return i;
+}
+
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, global_list) {
+ if (obj->pin_count == 0)
+ continue;
+
+ capture_bo(err++, obj);
+ if (++i == count)
+ break;
+ }
+
+ return i;
+}
+
+static void i915_gem_record_fences(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+static struct drm_i915_error_object *
+i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_object *obj;
+ u32 seqno;
+
+ if (!ring->get_seqno)
+ return NULL;
+
+ if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
+ u32 acthd = I915_READ(ACTHD);
+
+ if (WARN_ON(ring->id != RCS))
+ return NULL;
+
+ obj = ring->private;
+ if (acthd >= obj->gtt_offset &&
+ acthd < obj->gtt_offset + obj->base.size)
+ return i915_error_object_create(dev_priv, obj);
+ }
+
+ seqno = ring->get_seqno(ring, false);
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (obj->ring != ring)
+ continue;
+
+ if (i915_seqno_passed(seqno, obj->last_read_seqno))
+ continue;
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+ continue;
+
+ /* We need to copy these to an anonymous buffer as the simplest
+ * method to avoid being overwritten by userspace.
+ */
+ return i915_error_object_create(dev_priv, obj);
+ }
+
+ return NULL;
+}
+
+static void i915_record_ring_state(struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
+ error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
+ error->semaphore_mboxes[ring->id][0]
+ = I915_READ(RING_SYNC_0(ring->mmio_base));
+ error->semaphore_mboxes[ring->id][1]
+ = I915_READ(RING_SYNC_1(ring->mmio_base));
+ error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+ error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
+ error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
+ error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
+ error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
+ error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
+ if (ring->id == RCS)
+ error->bbaddr = I915_READ64(BB_ADDR);
+ } else {
+ error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
+ error->ipeir[ring->id] = I915_READ(IPEIR);
+ error->ipehr[ring->id] = I915_READ(IPEHR);
+ error->instdone[ring->id] = I915_READ(INSTDONE);
+ }
+
+ error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
+ error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
+ error->seqno[ring->id] = ring->get_seqno(ring, false);
+ error->acthd[ring->id] = intel_ring_get_active_head(ring);
+ error->head[ring->id] = I915_READ_HEAD(ring);
+ error->tail[ring->id] = I915_READ_TAIL(ring);
+ error->ctl[ring->id] = I915_READ_CTL(ring);
+
+ error->cpu_ring_head[ring->id] = ring->head;
+ error->cpu_ring_tail[ring->id] = ring->tail;
+}
+
+
+static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
+ struct drm_i915_error_state *error,
+ struct drm_i915_error_ring *ering)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ /* Currently render ring is the only HW context user */
+ if (ring->id != RCS || !error->ccid)
+ return;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
+ ering->ctx = i915_error_object_create_sized(dev_priv,
+ obj, 1);
+ }
+ }
+}
+
+static void i915_gem_record_rings(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ struct drm_i915_gem_request *request;
+ int i, count;
+
+ for_each_ring(ring, dev_priv, i) {
+ i915_record_ring_state(dev, error, ring);
+
+ error->ring[i].batchbuffer =
+ i915_error_first_batchbuffer(dev_priv, ring);
+
+ error->ring[i].ringbuffer =
+ i915_error_object_create(dev_priv, ring->obj);
+
+
+ i915_gem_record_active_context(ring, error, &error->ring[i]);
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list)
+ count++;
+
+ error->ring[i].num_requests = count;
+ error->ring[i].requests =
+ kmalloc(count*sizeof(struct drm_i915_error_request),
+ GFP_ATOMIC);
+ if (error->ring[i].requests == NULL) {
+ error->ring[i].num_requests = 0;
+ continue;
+ }
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list) {
+ struct drm_i915_error_request *erq;
+
+ erq = &error->ring[i].requests[count++];
+ erq->seqno = request->seqno;
+ erq->jiffies = request->emitted_jiffies;
+ erq->tail = request->tail;
+ }
+ }
+}
+
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @dev: drm device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error. Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+static void i915_capture_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct drm_i915_error_state *error;
+ unsigned long flags;
+ int i, pipe;
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ if (error)
+ return;
+
+ /* Account for pipe specific data like PIPE*STAT */
+ error = kzalloc(sizeof(*error), GFP_ATOMIC);
+ if (!error) {
+ DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+ return;
+ }
+
+ DRM_INFO("capturing error event; look for more information in "
+ "/sys/kernel/debug/dri/%d/i915_error_state\n",
+ dev->primary->index);
+
+ kref_init(&error->ref);
+ error->eir = I915_READ(EIR);
+ error->pgtbl_er = I915_READ(PGTBL_ER);
+ if (HAS_HW_CONTEXTS(dev))
+ error->ccid = I915_READ(CCID);
+
+ if (HAS_PCH_SPLIT(dev))
+ error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else if (IS_VALLEYVIEW(dev))
+ error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+ else if (IS_GEN2(dev))
+ error->ier = I915_READ16(IER);
+ else
+ error->ier = I915_READ(IER);
+
+ if (INTEL_INFO(dev)->gen >= 6)
+ error->derrmr = I915_READ(DERRMR);
+
+ if (IS_VALLEYVIEW(dev))
+ error->forcewake = I915_READ(FORCEWAKE_VLV);
+ else if (INTEL_INFO(dev)->gen >= 7)
+ error->forcewake = I915_READ(FORCEWAKE_MT);
+ else if (INTEL_INFO(dev)->gen == 6)
+ error->forcewake = I915_READ(FORCEWAKE);
+
+ if (!HAS_PCH_SPLIT(dev))
+ for_each_pipe(pipe)
+ error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->error = I915_READ(ERROR_GEN6);
+ error->done_reg = I915_READ(DONE_REG);
+ }
+
+ if (INTEL_INFO(dev)->gen == 7)
+ error->err_int = I915_READ(GEN7_ERR_INT);
+
+ i915_get_extra_instdone(dev, error->extra_instdone);
+
+ i915_gem_record_fences(dev, error);
+ i915_gem_record_rings(dev, error);
+
+ /* Record buffers on the active and pinned lists. */
+ error->active_bo = NULL;
+ error->pinned_bo = NULL;
+
+ i = 0;
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
+ i++;
+ error->active_bo_count = i;
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+ if (obj->pin_count)
+ i++;
+ error->pinned_bo_count = i - error->active_bo_count;
+
+ error->active_bo = NULL;
+ error->pinned_bo = NULL;
+ if (i) {
+ error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
+ GFP_ATOMIC);
+ if (error->active_bo)
+ error->pinned_bo =
+ error->active_bo + error->active_bo_count;
+ }
+
+ if (error->active_bo)
+ error->active_bo_count =
+ capture_active_bo(error->active_bo,
+ error->active_bo_count,
+ &dev_priv->mm.active_list);
+
+ if (error->pinned_bo)
+ error->pinned_bo_count =
+ capture_pinned_bo(error->pinned_bo,
+ error->pinned_bo_count,
+ &dev_priv->mm.bound_list);
+
+ do_gettimeofday(&error->time);
+
+ error->overlay = intel_overlay_capture_error_state(dev);
+ error->display = intel_display_capture_error_state(dev);
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ if (dev_priv->gpu_error.first_error == NULL) {
+ dev_priv->gpu_error.first_error = error;
+ error = NULL;
+ }
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+ if (error)
+ i915_error_state_free(&error->ref);
+}
+
+void i915_destroy_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ dev_priv->gpu_error.first_error = NULL;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+ if (error)
+ kref_put(&error->ref, i915_error_state_free);
+}
+#else
+#define i915_capture_error_state(x)
+#endif
+
+static void i915_report_and_clear_eir(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t instdone[I915_NUM_INSTDONE_REG];
+ u32 eir = I915_READ(EIR);
+ int pipe;
+
+ if (!eir)
+ return;
+
+ DRM_ERROR("render error detected, EIR: 0x%08x\n", eir);
+
+ i915_get_extra_instdone(dev, instdone);
+
+ if (IS_G4X(dev)) {
+ if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
+ u32 ipeir = I915_READ(IPEIR_I965);
+
+ DRM_DEBUG(" IPEIR: 0x%08x\n",
+ I915_READ(IPEIR_I965));
+ DRM_DEBUG(" IPEHR: 0x%08x\n",
+ I915_READ(IPEHR_I965));
+ DRM_DEBUG(" INSTDONE: 0x%08x\n",
+ I915_READ(INSTDONE_I965));
+ DRM_DEBUG(" INSTPS: 0x%08x\n",
+ I915_READ(INSTPS));
+ DRM_DEBUG(" INSTDONE1: 0x%08x\n",
+ I915_READ(INSTDONE1));
+ DRM_DEBUG(" ACTHD: 0x%08x\n",
+ I915_READ(ACTHD_I965));
+ I915_WRITE(IPEIR_I965, ipeir);
+ POSTING_READ(IPEIR_I965);
+ }
+ if (eir & GM45_ERROR_PAGE_TABLE) {
+ u32 pgtbl_err = I915_READ(PGTBL_ER);
+ DRM_DEBUG("page table error\n");
+ DRM_DEBUG(" PGTBL_ER: 0x%08x\n",
+ pgtbl_err);
+ I915_WRITE(PGTBL_ER, pgtbl_err);
+ POSTING_READ(PGTBL_ER);
+ }
+ }
+
+ if (!IS_GEN2(dev)) {
+ if (eir & I915_ERROR_PAGE_TABLE) {
+ u32 pgtbl_err = I915_READ(PGTBL_ER);
+ DRM_DEBUG("page table error\n");
+ DRM_DEBUG(" PGTBL_ER: 0x%08x\n",
+ pgtbl_err);
+ I915_WRITE(PGTBL_ER, pgtbl_err);
+ POSTING_READ(PGTBL_ER);
+ }
+ }
+
+ if (eir & I915_ERROR_MEMORY_REFRESH) {
+ DRM_DEBUG("memory refresh error:\n");
+ for_each_pipe(pipe)
+ DRM_DEBUG("pipe %c stat: 0x%08x\n",
+ pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
+ /* pipestat has already been acked */
+ }
+ if (eir & I915_ERROR_INSTRUCTION) {
+ DRM_DEBUG("instruction error\n");
+ DRM_DEBUG(" INSTPM: 0x%08x\n",
+ I915_READ(INSTPM));
+ if (INTEL_INFO(dev)->gen < 4) {
+ u32 ipeir = I915_READ(IPEIR);
+
+ DRM_DEBUG(" IPEIR: 0x%08x\n",
+ I915_READ(IPEIR));
+ DRM_DEBUG(" IPEHR: 0x%08x\n",
+ I915_READ(IPEHR));
+ DRM_DEBUG(" INSTDONE: 0x%08x\n",
+ I915_READ(INSTDONE));
+ DRM_DEBUG(" ACTHD: 0x%08x\n",
+ I915_READ(ACTHD));
+ I915_WRITE(IPEIR, ipeir);
+ POSTING_READ(IPEIR);
+ } else {
+ u32 ipeir = I915_READ(IPEIR_I965);
+
+ DRM_DEBUG(" IPEIR: 0x%08x\n",
+ I915_READ(IPEIR_I965));
+ DRM_DEBUG(" IPEHR: 0x%08x\n",
+ I915_READ(IPEHR_I965));
+ DRM_DEBUG(" INSTDONE: 0x%08x\n",
+ I915_READ(INSTDONE_I965));
+ DRM_DEBUG(" INSTPS: 0x%08x\n",
+ I915_READ(INSTPS));
+ DRM_DEBUG(" INSTDONE1: 0x%08x\n",
+ I915_READ(INSTDONE1));
+ DRM_DEBUG(" ACTHD: 0x%08x\n",
+ I915_READ(ACTHD_I965));
+ I915_WRITE(IPEIR_I965, ipeir);
+ POSTING_READ(IPEIR_I965);
+ }
+ }
+
+ I915_WRITE(EIR, eir);
+ POSTING_READ(EIR);
+ eir = I915_READ(EIR);
+ if (eir) {
+ /*
+ * some errors might have become stuck,
+ * mask them.
+ */
+ DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
+ I915_WRITE(EMR, I915_READ(EMR) | eir);
+ I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ }
+}
+
+/**
+ * i915_handle_error - handle an error interrupt
+ * @dev: drm device
+ *
+ * Do some basic checking of regsiter state at error interrupt time and
+ * dump it to the syslog. Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs. Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+void i915_handle_error(struct drm_device *dev, bool wedged)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
+
+ i915_capture_error_state(dev);
+ i915_report_and_clear_eir(dev);
+
+ if (wedged) {
+ atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
+ &dev_priv->gpu_error.reset_counter);
+
+ /*
+ * Wakeup waiting processes so they don't hang
+ */
+ for_each_ring(ring, dev_priv, i)
+ wake_up_all(&ring->irq_queue);
+ }
+
+ (void) queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
+}
+
+static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj;
+ struct intel_unpin_work *work;
+ unsigned long flags;
+ bool stall_detected;
+
+ /* Ignore early vblank irqs */
+ if (intel_crtc == NULL)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+
+ if (work == NULL ||
+ atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
+ !work->enable_stall_check) {
+ /* Either the pending flip IRQ arrived, or we're too early. Don't check */
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return;
+ }
+
+ /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
+ obj = work->pending_flip_obj;
+ if (INTEL_INFO(dev)->gen >= 4) {
+ int dspsurf = DSPSURF(intel_crtc->plane);
+ stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
+ obj->gtt_offset;
+ } else {
+ int dspaddr = DSPADDR(intel_crtc->plane);
+ stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
+ crtc->y * crtc->fb->pitches[0] +
+ crtc->x * crtc->fb->bits_per_pixel/8);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (stall_detected) {
+ DRM_DEBUG_DRIVER("Pageflip stall detected\n");
+ intel_prepare_page_flip(dev, intel_crtc->plane);
+ }
+}
+
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+static int i915_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (INTEL_INFO(dev)->gen >= 4)
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ else
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ /* maintain vblank delivery even in deep C-states */
+ if (dev_priv->info->gen == 3)
+ I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ return 0;
+}
+
+static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ return 0;
+}
+
+static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_enable_display_irq(dev_priv,
+ DE_PIPEA_VBLANK_IVB << (5 * pipe));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ return 0;
+}
+
+static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+ u32 imr;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ imr = I915_READ(VLV_IMR);
+ if (pipe == 0)
+ imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+ else
+ imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+ I915_WRITE(VLV_IMR, imr);
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ return 0;
+}
+
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+static void i915_disable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (dev_priv->info->gen == 3)
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
+
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_ENABLE |
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_disable_display_irq(dev_priv,
+ DE_PIPEA_VBLANK_IVB << (pipe * 5));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+ u32 imr;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ imr = I915_READ(VLV_IMR);
+ if (pipe == 0)
+ imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+ else
+ imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+ I915_WRITE(VLV_IMR, imr);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static u32
+ring_last_seqno(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_request *last_req;
+ last_req = list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request, list);
+ return (last_req->seqno);
+}
+
+static bool
+ring_idle(struct intel_ring_buffer *ring, u32 seqno)
+{
+ return (list_empty(&ring->request_list) ||
+ i915_seqno_passed(seqno, ring_last_seqno(ring)));
+}
+
+static struct intel_ring_buffer *
+semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 cmd, ipehr, acthd, acthd_min;
+ u32 *tmp;
+
+ ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
+ if ((ipehr & ~(0x3 << 16)) !=
+ (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
+ return NULL;
+
+ /* ACTHD is likely pointing to the dword after the actual command,
+ * so scan backwards until we find the MBOX.
+ */
+ acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
+ acthd_min = max((int)acthd - 3 * 4, 0);
+ do {
+ tmp = (u32 *)((intptr_t)ring->virtual_start + acthd);
+ cmd = *tmp;
+ if (cmd == ipehr)
+ break;
+
+ acthd -= 4;
+ if (acthd < acthd_min)
+ return NULL;
+ } while (1);
+
+ tmp = (u32 *)((intptr_t)ring->virtual_start + acthd + 4 );
+ *seqno = *tmp+1;
+ return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
+}
+
+static int semaphore_passed(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_ring_buffer *signaller;
+ u32 seqno, ctl;
+
+ ring->hangcheck.deadlock = true;
+
+ signaller = semaphore_waits_for(ring, &seqno);
+ if (signaller == NULL || signaller->hangcheck.deadlock)
+ return -1;
+
+ /* cursory check for an unkickable deadlock */
+ ctl = I915_READ_CTL(signaller);
+ if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
+ return -1;
+
+ return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
+}
+
+static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
+{
+ struct intel_ring_buffer *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i)
+ ring->hangcheck.deadlock = false;
+}
+
+static enum intel_ring_hangcheck_action
+ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ if (ring->hangcheck.acthd != acthd)
+ return active;
+
+ if (IS_GEN2(dev))
+ return hung;
+
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ tmp = I915_READ_CTL(ring);
+ if (tmp & RING_WAIT) {
+ DRM_ERROR("Kicking stuck wait on %s\n",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return kick;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+ switch (semaphore_passed(ring)) {
+ default:
+ return hung;
+ case 1:
+ DRM_ERROR("Kicking stuck semaphore on %s\n",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return kick;
+ case 0:
+ return wait;
+ }
+ }
+
+ return hung;
+}
+
+/**
+ * This is called when the chip hasn't reported back with completed
+ * batchbuffers in a long time. The first time this is called we simply record
+ * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
+ * Further, acthd is inspected to see if the ring is stuck. On stuck case
+ * we kick the ring. If we see no progress on three subsequent calls
+ * again, we assume the chip is wedged and try to fix it.
+ */
+void i915_hangcheck_elapsed(void* data)
+{
+ struct drm_device *dev = (struct drm_device *)data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
+ int busy_count = 0, rings_hung = 0;
+ bool stuck[I915_NUM_RINGS] = { 0 };
+#define BUSY 1
+#define KICK 5
+#define HUNG 20
+#define FIRE 30
+
+ if (!i915_enable_hangcheck)
+ return;
+
+ for_each_ring(ring, dev_priv, i) {
+ u32 seqno, acthd;
+ bool busy = true;
+
+ semaphore_clear_deadlocks(dev_priv);
+
+ seqno = ring->get_seqno(ring, false);
+ acthd = intel_ring_get_active_head(ring);
+
+ if (ring->hangcheck.seqno == seqno) {
+ if (ring_idle(ring, seqno)) {
+ if (mutex_is_locked(&ring->irq_queue.lock)) {
+ /* Issue a wake-up to catch stuck h/w. */
+ DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+ ring->name);
+ wake_up_all(&ring->irq_queue);
+ ring->hangcheck.score += HUNG;
+ } else
+ busy = false;
+ } else {
+ int score = 0;
+
+ /* We always increment the hangcheck score
+ * if the ring is busy and still processing
+ * the same request, so that no single request
+ * can run indefinitely (such as a chain of
+ * batches). The only time we do not increment
+ * the hangcheck score on this ring, if this
+ * ring is in a legitimate wait for another
+ * ring. In that case the waiting ring is a
+ * victim and we want to be sure we catch the
+ * right culprit. Then every time we do kick
+ * the ring, add a small increment to the
+ * score so that we can catch a batch that is
+ * being repeatedly kicked and so responsible
+ * for stalling the machine.
+ */
+ ring->hangcheck.action = ring_stuck(ring,
+ acthd);
+
+ switch (ring->hangcheck.action) {
+ case wait:
+ score = 0;
+ break;
+ case active:
+ score = BUSY;
+ break;
+ case kick:
+ score = KICK;
+ break;
+ case hung:
+ score = HUNG;
+ stuck[i] = true;
+ break;
+ }
+ ring->hangcheck.score += score;
+ }
+ } else {
+ /* Gradually reduce the count so that we catch DoS
+ * attempts across multiple batches.
+ */
+ if (ring->hangcheck.score > 0)
+ ring->hangcheck.score--;
+ }
+
+ ring->hangcheck.seqno = seqno;
+ ring->hangcheck.acthd = acthd;
+ busy_count += busy;
+ }
+
+ for_each_ring(ring, dev_priv, i) {
+ if (ring->hangcheck.score > FIRE) {
+ DRM_ERROR("%s on %s\n",
+ stuck[i] ? "stuck" : "no progress",
+ ring->name);
+ rings_hung++;
+ }
+ }
+
+ if (rings_hung) {
+ i915_handle_error(dev, true);
+ return;
+ }
+
+ if (busy_count)
+ /* Reset timer case chip hangs without another request
+ * being added */
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+}
+
+static void ibx_irq_preinstall(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_PCH_NOP(dev))
+ return;
+
+ /* south display irq */
+ I915_WRITE(SDEIMR, 0xffffffff);
+ /*
+ * SDEIER is also touched by the interrupt handler to work around missed
+ * PCH interrupts. Hence we can't update it after the interrupt handler
+ * is enabled - instead we unconditionally enable all PCH interrupt
+ * sources here, but then only unmask them as needed with SDEIMR.
+ */
+ I915_WRITE(SDEIER, 0xffffffff);
+ POSTING_READ(SDEIER);
+}
+
+/* drm_dma.h hooks
+*/
+static void ironlake_irq_preinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ I915_WRITE(HWSTAM, 0xeffe);
+
+ /* XXX hotplug from PCH */
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ POSTING_READ(DEIER);
+
+ /* and GT */
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ POSTING_READ(GTIER);
+
+ ibx_irq_preinstall(dev);
+}
+
+static void ivybridge_irq_preinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ I915_WRITE(HWSTAM, 0xeffe);
+
+ /* XXX hotplug from PCH */
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ POSTING_READ(DEIER);
+
+ /* and GT */
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ POSTING_READ(GTIER);
+
+ /* Power management */
+ I915_WRITE(GEN6_PMIMR, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0x0);
+ POSTING_READ(GEN6_PMIER);
+
+ ibx_irq_preinstall(dev);
+}
+
+static void valleyview_irq_preinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ /* VLV magic */
+ I915_WRITE(VLV_IMR, 0);
+ I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
+ I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
+ I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
+
+ /* and GT */
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ POSTING_READ(GTIER);
+
+ I915_WRITE(DPINVGTT, 0xff);
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ POSTING_READ(VLV_IER);
+}
+
+static void ibx_hpd_irq_setup(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *intel_encoder;
+ u32 mask = ~I915_READ(SDEIMR);
+ u32 hotplug;
+
+ if (HAS_PCH_IBX(dev)) {
+ mask &= ~SDE_HOTPLUG_MASK;
+ list_for_each_entry(intel_encoder, struct intel_encoder, &mode_config->encoder_list, base.head)
+ if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ mask |= hpd_ibx[intel_encoder->hpd_pin];
+ } else {
+ mask &= ~SDE_HOTPLUG_MASK_CPT;
+ list_for_each_entry(intel_encoder, struct intel_encoder, &mode_config->encoder_list, base.head)
+ if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ mask |= hpd_cpt[intel_encoder->hpd_pin];
+ }
+
+ I915_WRITE(SDEIMR, ~mask);
+
+ /*
+ * Enable digital hotplug on the PCH, and configure the DP short pulse
+ * duration to 2ms (which is the minimum in the Display Port spec)
+ *
+ * This register is the same on all known PCH chips.
+ */
+ hotplug = I915_READ(PCH_PORT_HOTPLUG);
+ hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
+ hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
+ hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
+ hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
+ I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+}
+
+static void ibx_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 mask;
+
+ if (HAS_PCH_NOP(dev))
+ return;
+
+ if (HAS_PCH_IBX(dev)) {
+ mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
+ SDE_TRANSA_FIFO_UNDER | SDE_POISON;
+ } else {
+ mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
+
+ I915_WRITE(SERR_INT, I915_READ(SERR_INT));
+ }
+
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ I915_WRITE(SDEIMR, ~mask);
+}
+
+static int ironlake_irq_postinstall(struct drm_device *dev)
+{
+ unsigned long irqflags;
+
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ /* enable kind of interrupts always enabled */
+ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
+ DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
+ DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
+ u32 gt_irqs;
+
+ dev_priv->irq_mask = ~display_mask;
+
+ /* should always can generate irq */
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ I915_WRITE(DEIER, display_mask |
+ DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
+
+ /* LINTED */
+ dev_priv->gt_irq_mask = ~0;
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+
+ gt_irqs = GT_RENDER_USER_INTERRUPT;
+
+ if (IS_GEN6(dev))
+ gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
+ else
+ gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
+ ILK_BSD_USER_INTERRUPT;
+
+ I915_WRITE(GTIER, gt_irqs);
+
+ ibx_irq_postinstall(dev);
+ POSTING_READ(GTIER);
+ POSTING_READ(DEIER);
+
+ if (IS_IRONLAKE_M(dev)) {
+ /* Enable PCU event interrupts
+ *
+ * spinlocking not required here for correctness since interrupt
+ * setup is guaranteed to run in single-threaded context. But we
+ * need it to make the assert_spin_locked happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
+ return 0;
+}
+
+static int ivybridge_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ /* enable kind of interrupts always enabled */
+ u32 display_mask =
+ DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
+ DE_PLANEC_FLIP_DONE_IVB |
+ DE_PLANEB_FLIP_DONE_IVB |
+ DE_PLANEA_FLIP_DONE_IVB |
+ DE_AUX_CHANNEL_A_IVB |
+ DE_ERR_INT_IVB;
+ u32 pm_irqs = GEN6_PM_RPS_EVENTS;
+ u32 gt_irqs;
+
+ dev_priv->irq_mask = ~display_mask;
+
+ /* should always can generate irq */
+ I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ I915_WRITE(DEIER,
+ display_mask |
+ DE_PIPEC_VBLANK_IVB |
+ DE_PIPEB_VBLANK_IVB |
+ DE_PIPEA_VBLANK_IVB);
+ POSTING_READ(DEIER);
+
+ /* LINTED */
+ dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+
+ gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
+ GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ I915_WRITE(GTIER, gt_irqs);
+ POSTING_READ(GTIER);
+
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+ if (HAS_VEBOX(dev))
+ pm_irqs |= PM_VEBOX_USER_INTERRUPT |
+ PM_VEBOX_CS_ERROR_INTERRUPT;
+
+ /* Our enable/disable rps functions may touch these registers so
+ * make sure to set a known state for only the non-RPS bits.
+ * The RMW is extra paranoia since this should be called after being set
+ * to a known state in preinstall.
+ * */
+ I915_WRITE(GEN6_PMIMR,
+ (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
+ I915_WRITE(GEN6_PMIER,
+ (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
+ POSTING_READ(GEN6_PMIER);
+
+ ibx_irq_postinstall(dev);
+
+ return 0;
+}
+
+static int valleyview_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 gt_irqs;
+ u32 enable_mask;
+ u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+
+ enable_mask = I915_DISPLAY_PORT_INTERRUPT;
+ enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+ /*
+ *Leave vblank interrupts masked initially. enable/disable will
+ * toggle them based on usage.
+ */
+ dev_priv->irq_mask = (~enable_mask) |
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, enable_mask);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(PIPESTAT(0), 0xffff);
+ I915_WRITE(PIPESTAT(1), 0xffff);
+ POSTING_READ(VLV_IER);
+
+ i915_enable_pipestat(dev_priv, 0, pipestat_enable);
+ i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+ i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+
+ gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
+ GT_BLT_USER_INTERRUPT;
+ I915_WRITE(GTIER, gt_irqs);
+ POSTING_READ(GTIER);
+
+ /* ack & enable invalid PTE error interrupts */
+#if 0 /* FIXME: add support to irq handler for checking these bits */
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+ I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
+#endif
+
+ I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+
+ return 0;
+}
+
+static void valleyview_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (!dev_priv)
+ return;
+
+ del_timer_sync(&dev_priv->hotplug_reenable_timer);
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ POSTING_READ(VLV_IER);
+}
+
+static void ironlake_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!dev_priv)
+ return;
+
+ del_timer_sync(&dev_priv->hotplug_reenable_timer);
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+ if (IS_GEN7(dev))
+ I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
+
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+
+ if (HAS_PCH_NOP(dev))
+ return;
+
+ I915_WRITE(SDEIMR, 0xffffffff);
+ I915_WRITE(SDEIER, 0x0);
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
+ I915_WRITE(SERR_INT, I915_READ(SERR_INT));
+}
+
+static void i8xx_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
+ POSTING_READ16(IER);
+}
+
+static int i8xx_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ I915_WRITE16(EMR,
+ ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+ /* Unmask the interrupts that we always want on. */
+ /* LINTED */
+ dev_priv->irq_mask =
+ ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+
+ I915_WRITE16(IER,
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT);
+ POSTING_READ16(IER);
+
+ return 0;
+}
+
+/*
+ * Returns true when a page flip has completed.
+ */
+static bool i8xx_handle_vblank(struct drm_device *dev,
+ int pipe, u16 iir)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
+
+ if (!drm_handle_vblank(dev, pipe))
+ return false;
+
+ if ((iir & flip_pending) == 0)
+ return false;
+
+ intel_prepare_page_flip(dev, pipe);
+
+ /* We detect FlipDone by looking for the change in PendingFlip from '1'
+ * to '0' on the following vblank, i.e. IIR has the Pendingflip
+ * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
+ * the flip is completed (no longer pending). Since this doesn't raise
+ * an interrupt per se, we watch for the change at vblank.
+ */
+ if (I915_READ16(ISR) & flip_pending)
+ return false;
+
+ intel_finish_page_flip(dev, pipe);
+
+ return true;
+}
+
+static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
+{
+ /* LINTED */
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u16 iir, new_iir;
+ u32 pipe_stats[2] = { 0 };
+ unsigned long irqflags;
+ int pipe;
+ u16 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ16(IIR);
+ if (iir == 0)
+ return IRQ_NONE;
+
+ while (iir & ~flip_mask) {
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ I915_WRITE16(IIR, iir & ~flip_mask);
+ new_iir = I915_READ16(IIR); /* Flush posted writes */
+
+ i915_update_dri1_breadcrumb(dev);
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+
+ if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ i8xx_handle_vblank(dev, 0, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
+
+ if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ i8xx_handle_vblank(dev, 1, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
+
+ iir = new_iir;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void i8xx_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ for_each_pipe(pipe) {
+ /* Clear enable bits; then clear status bits */
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+ }
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
+ I915_WRITE16(IIR, I915_READ16(IIR));
+}
+
+static void i915_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE16(HWSTAM, 0xeffe);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+ POSTING_READ(IER);
+}
+
+static int i915_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 enable_mask;
+
+ I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+ /* Unmask the interrupts that we always want on. */
+ /* LINTED */
+ dev_priv->irq_mask =
+ ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+ enable_mask =
+ I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
+ /* Enable in IER... */
+ enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+ /* and unmask in IMR */
+ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
+ }
+
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ I915_WRITE(IER, enable_mask);
+ POSTING_READ(IER);
+
+
+ return 0;
+}
+
+/*
+ * Returns true when a page flip has completed.
+ */
+static bool i915_handle_vblank(struct drm_device *dev,
+ int plane, int pipe, u32 iir)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
+
+ if (!drm_handle_vblank(dev, pipe))
+ return false;
+
+ if ((iir & flip_pending) == 0)
+ return false;
+
+ intel_prepare_page_flip(dev, plane);
+
+ /* We detect FlipDone by looking for the change in PendingFlip from '1'
+ * to '0' on the following vblank, i.e. IIR has the Pendingflip
+ * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
+ * the flip is completed (no longer pending). Since this doesn't raise
+ * an interrupt per se, we watch for the change at vblank.
+ */
+ if (I915_READ(ISR) & flip_pending)
+ return false;
+
+ intel_finish_page_flip(dev, pipe);
+
+ return true;
+}
+
+static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
+{
+ /* LINTED */
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 iir, new_iir, pipe_stats[I915_MAX_PIPES] = { 0 };
+ unsigned long irqflags;
+ u32 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ int pipe, ret = IRQ_NONE;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ(IIR);
+ do {
+ bool irq_received = (iir & ~flip_mask) != 0;
+
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /* Clear the PIPE*STAT regs before the IIR */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = true;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ if (!irq_received)
+ break;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if ((I915_HAS_HOTPLUG(dev)) &&
+ (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+
+ intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ POSTING_READ(PORT_HOTPLUG_STAT);
+ }
+
+ I915_WRITE(IIR, iir & ~flip_mask);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+
+ for_each_pipe(pipe) {
+ int plane = pipe;
+ if (IS_MOBILE(dev))
+ plane = !plane;
+
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ i915_handle_vblank(dev, plane, pipe, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
+
+ }
+
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ ret = IRQ_HANDLED;
+ iir = new_iir;
+ } while (iir & ~flip_mask);
+
+ i915_update_dri1_breadcrumb(dev);
+
+ return ret;
+}
+
+static void i915_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ del_timer_sync(&dev_priv->hotplug_reenable_timer);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE16(HWSTAM, 0xffff);
+ for_each_pipe(pipe) {
+ /* Clear enable bits; then clear status bits */
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+ }
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+
+ I915_WRITE(IIR, I915_READ(IIR));
+}
+
+static void i965_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ I915_WRITE(HWSTAM, 0xeffe);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+ POSTING_READ(IER);
+}
+
+static int i965_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 enable_mask;
+ u32 error_mask;
+
+ /* Unmask the interrupts that we always want on. */
+ /* LINTED */
+ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+ enable_mask = ~dev_priv->irq_mask;
+ enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
+ enable_mask |= I915_USER_INTERRUPT;
+
+ if (IS_G4X(dev))
+ enable_mask |= I915_BSD_USER_INTERRUPT;
+
+ i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+
+ /*
+ * Enable some error detection, note the instruction error mask
+ * bit is reserved, so we leave it masked.
+ */
+ if (IS_G4X(dev)) {
+ /* LINTED */
+ error_mask = ~(GM45_ERROR_PAGE_TABLE |
+ GM45_ERROR_MEM_PRIV |
+ GM45_ERROR_CP_PRIV |
+ I915_ERROR_MEMORY_REFRESH);
+ } else {
+ /* LINTED */
+ error_mask = ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH);
+ }
+ I915_WRITE(EMR, error_mask);
+
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ I915_WRITE(IER, enable_mask);
+ POSTING_READ(IER);
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ POSTING_READ(PORT_HOTPLUG_EN);
+
+
+ return 0;
+}
+
+static void i915_hpd_irq_setup(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *intel_encoder;
+ u32 hotplug_en;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ hotplug_en &= ~HOTPLUG_INT_EN_MASK;
+ /* Note HDMI and DP share hotplug bits */
+ /* enable bits are the same for all generations */
+ list_for_each_entry(intel_encoder, struct intel_encoder, &mode_config->encoder_list, base.head)
+ if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+ hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
+ /* Programming the CRT detection parameters tends
+ to generate a spurious hotplug event about three
+ seconds later. So just do it once.
+ */
+ if (IS_G4X(dev))
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+ /* Ignore TV since it's buggy */
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ }
+}
+
+static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
+{
+ /* LINTED */
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 iir, new_iir;
+ u32 pipe_stats[I915_MAX_PIPES] = { 0 };
+ unsigned long irqflags;
+ int irq_received;
+ int ret = IRQ_NONE, pipe;
+ u32 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ(IIR);
+
+ for (;;) {
+
+ irq_received = (iir & ~flip_mask) != 0;
+
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ if (!irq_received)
+ break;
+
+ ret = IRQ_HANDLED;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+ u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
+ HOTPLUG_INT_STATUS_G4X :
+ HOTPLUG_INT_STATUS_I915);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+
+ intel_hpd_irq_handler(dev, hotplug_trigger,
+ IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ I915_READ(PORT_HOTPLUG_STAT);
+ }
+
+ I915_WRITE(IIR, iir & ~flip_mask);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (iir & I915_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
+ i915_handle_vblank(dev, pipe, pipe, iir))
+ flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
+
+ }
+
+
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ iir = new_iir;
+ }
+
+ i915_update_dri1_breadcrumb(dev);
+
+ return ret;
+}
+
+static void i965_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (!dev_priv)
+ return;
+
+ del_timer_sync(&dev_priv->hotplug_reenable_timer);
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe),
+ I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
+ I915_WRITE(IIR, I915_READ(IIR));
+}
+
+static void i915_reenable_hotplug_timer_func(void* data)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ unsigned long irqflags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
+ struct drm_connector *connector;
+
+ if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
+ continue;
+
+ dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
+
+ list_for_each_entry(connector, struct drm_connector, &mode_config->connector_list, head) {
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (intel_connector->encoder->hpd_pin == i) {
+ if (connector->polled != intel_connector->polled)
+ DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
+ drm_get_connector_name(connector));
+ connector->polled = intel_connector->polled;
+ if (!connector->polled)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ }
+ }
+ }
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+void intel_irq_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
+ INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
+ INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+
+ init_timer(&dev_priv->gpu_error.hangcheck_timer);
+ setup_timer(&dev_priv->gpu_error.hangcheck_timer,
+ i915_hangcheck_elapsed,
+ (void *) dev);
+ init_timer(&dev_priv->hotplug_reenable_timer);
+ setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
+ (void *) dev_priv);
+
+
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+ dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+ else
+ dev->driver->get_vblank_timestamp = NULL;
+ dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev->driver->irq_handler = valleyview_irq_handler;
+ dev->driver->irq_preinstall = valleyview_irq_preinstall;
+ dev->driver->irq_postinstall = valleyview_irq_postinstall;
+ dev->driver->irq_uninstall = valleyview_irq_uninstall;
+ dev->driver->enable_vblank = valleyview_enable_vblank;
+ dev->driver->disable_vblank = valleyview_disable_vblank;
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ /* Share pre & uninstall handlers with ILK/SNB */
+ dev->driver->irq_handler = ivybridge_irq_handler;
+ dev->driver->irq_preinstall = ivybridge_irq_preinstall;
+ dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ivybridge_enable_vblank;
+ dev->driver->disable_vblank = ivybridge_disable_vblank;
+ dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev->driver->irq_handler = ironlake_irq_handler;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ironlake_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ironlake_enable_vblank;
+ dev->driver->disable_vblank = ironlake_disable_vblank;
+ dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
+ } else {
+ if (INTEL_INFO(dev)->gen == 2) {
+ dev->driver->irq_preinstall = i8xx_irq_preinstall;
+ dev->driver->irq_postinstall = i8xx_irq_postinstall;
+ dev->driver->irq_handler = i8xx_irq_handler;
+ dev->driver->irq_uninstall = i8xx_irq_uninstall;
+ } else if (INTEL_INFO(dev)->gen == 3) {
+ dev->driver->irq_preinstall = i915_irq_preinstall;
+ dev->driver->irq_postinstall = i915_irq_postinstall;
+ dev->driver->irq_uninstall = i915_irq_uninstall;
+ dev->driver->irq_handler = i915_irq_handler;
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else {
+ dev->driver->irq_preinstall = i965_irq_preinstall;
+ dev->driver->irq_postinstall = i965_irq_postinstall;
+ dev->driver->irq_uninstall = i965_irq_uninstall;
+ dev->driver->irq_handler = i965_irq_handler;
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ }
+ dev->driver->enable_vblank = i915_enable_vblank;
+ dev->driver->disable_vblank = i915_disable_vblank;
+ }
+}
+
+void intel_hpd_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ unsigned long irqflags;
+ int i;
+
+ for (i = 1; i < HPD_NUM_PINS; i++) {
+ dev_priv->hpd_stats[i].hpd_cnt = 0;
+ dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
+ }
+ list_for_each_entry(connector, struct drm_connector, &mode_config->connector_list, head) {
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ connector->polled = intel_connector->polled;
+ if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ }
+
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked checks happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
diff --git a/usr/src/uts/intel/io/i915/i915_reg.h b/usr/src/uts/intel/io/i915/i915_reg.h
new file mode 100644
index 0000000..b04e4ac
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_reg.h
@@ -0,0 +1,5371 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012, 2013, Intel Corporation. All rights reserved.
+ */
+
+/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _I915_REG_H_
+#define _I915_REG_H_
+
+#define GEN6_CONF_GMADR 0x18
+#define GEN6_GTT_BASE_MASK 0xfffff000
+
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
+#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+
+#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
+#define _MASKED_BIT_DISABLE(a) ((a) << 16)
+
+#define I810_SMRAM_MISCC 0x70
+#define I810_GFX_MEM_WIN_SIZE 0x00010000
+#define I810_GFX_MEM_WIN_32M 0x00010000
+/* Note: PGETBL_CTL2 has a different offset on G33. */
+#define I965_PGETBL_CTL2 0x20c4
+#define I965_PGETBL_SIZE_MASK 0x0000000e
+#define I965_PGETBL_SIZE_512KB (0 << 1)
+#define I965_PGETBL_SIZE_256KB (1 << 1)
+#define I965_PGETBL_SIZE_128KB (2 << 1)
+#define I965_PGETBL_SIZE_1MB (3 << 1)
+#define I965_PGETBL_SIZE_2MB (4 << 1)
+#define I965_PGETBL_SIZE_1_5MB (5 << 1)
+#define G33_GMCH_SIZE_MASK (3 << 8)
+#define G33_GMCH_SIZE_1M (1 << 8)
+#define G33_GMCH_SIZE_2M (2 << 8)
+#define G4x_GMCH_SIZE_MASK (0xf << 8)
+#define G4x_GMCH_SIZE_1M (0x1 << 8)
+#define G4x_GMCH_SIZE_2M (0x3 << 8)
+#define G4x_GMCH_SIZE_VT_1M (0x9 << 8)
+#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
+#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
+/*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
+ * This is all handled in the intel-gtt.ko module. i915.ko only
+ * cares about the vga bit for the vga rbiter.
+ */
+#define INTEL_GMCH_CTRL 0x52
+#define INTEL_GMCH_VGA_DISABLE (1 << 1)
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
+#define SNB_GMCH_GGMS_MASK 0x3
+#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
+#define SNB_GMCH_GMS_MASK 0x1f
+#define INTEL_GMCH_ENABLED 0x4
+#define INTEL_GMCH_MEM_MASK 0x1
+#define INTEL_GMCH_MEM_64M 0x1
+#define INTEL_GMCH_MEM_128M 0
+
+#define I830_GMCH_GMS_MASK 0x70
+#define I830_GMCH_GMS_DISABLED 0x00
+#define I830_GMCH_GMS_LOCAL 0x10
+#define I830_GMCH_GMS_STOLEN_512 0x20
+#define I830_GMCH_GMS_STOLEN_1024 0x30
+#define I830_GMCH_GMS_STOLEN_8192 0x40
+#define I830_RDRAM_CHANNEL_TYPE 0x03010
+#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
+#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
+
+#define INTEL_GMCH_GMS_MASK (0xf << 4)
+#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+
+#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
+#define INTEL_GMCH_GMS_STOLEN_128M (0x8 << 4)
+#define INTEL_GMCH_GMS_STOLEN_256M (0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
+#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
+#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
+#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
+#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
+#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
+#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
+#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
+#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
+#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
+#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
+#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
+#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
+#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
+#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
+#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
+#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
+#define SNB_GTT_SIZE_0M (0 << 8)
+#define SNB_GTT_SIZE_1M (1 << 8)
+#define SNB_GTT_SIZE_2M (2 << 8)
+#define SNB_GTT_SIZE_MASK (3 << 8)
+
+/* PCI config space */
+
+#define HPLLCC 0xc0 /* 855 only */
+#define GC_CLOCK_CONTROL_MASK (0xf << 0)
+#define GC_CLOCK_133_200 (0 << 0)
+#define GC_CLOCK_100_200 (1 << 0)
+#define GC_CLOCK_100_133 (2 << 0)
+#define GC_CLOCK_166_250 (3 << 0)
+#define GCFGC2 0xda
+#define GCFGC 0xf0 /* 915+ only */
+#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
+#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
+#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
+#define GC_DISPLAY_CLOCK_MASK (7 << 4)
+#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0)
+#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0)
+#define GM45_GC_RENDER_CLOCK_320_MHZ (9 << 0)
+#define GM45_GC_RENDER_CLOCK_400_MHZ (0xb << 0)
+#define GM45_GC_RENDER_CLOCK_533_MHZ (0xc << 0)
+#define I965_GC_RENDER_CLOCK_MASK (0xf << 0)
+#define I965_GC_RENDER_CLOCK_267_MHZ (2 << 0)
+#define I965_GC_RENDER_CLOCK_333_MHZ (3 << 0)
+#define I965_GC_RENDER_CLOCK_444_MHZ (4 << 0)
+#define I965_GC_RENDER_CLOCK_533_MHZ (5 << 0)
+#define I945_GC_RENDER_CLOCK_MASK (7 << 0)
+#define I945_GC_RENDER_CLOCK_166_MHZ (0 << 0)
+#define I945_GC_RENDER_CLOCK_200_MHZ (1 << 0)
+#define I945_GC_RENDER_CLOCK_250_MHZ (3 << 0)
+#define I945_GC_RENDER_CLOCK_400_MHZ (5 << 0)
+#define I915_GC_RENDER_CLOCK_MASK (7 << 0)
+#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
+#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
+#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
+#define LBB 0xf4
+
+/* Graphics reset regs */
+#define I965_GDRST 0xc0 /* PCI config register */
+#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
+#define GRDOM_FULL (0<<2)
+#define GRDOM_RENDER (1<<2)
+#define GRDOM_MEDIA (3<<2)
+#define GRDOM_MASK (3<<2)
+#define GRDOM_RESET_ENABLE (1<<0)
+
+#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
+#define GEN6_MBC_SNPCR_SHIFT 21
+#define GEN6_MBC_SNPCR_MASK (3<<21)
+#define GEN6_MBC_SNPCR_MAX (0<<21)
+#define GEN6_MBC_SNPCR_MED (1<<21)
+#define GEN6_MBC_SNPCR_LOW (2<<21)
+#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
+
+#define GEN6_MBCTL 0x0907c
+#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
+#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
+#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
+#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
+#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
+
+#define GEN6_GDRST 0x941c
+#define GEN6_GRDOM_FULL (1 << 0)
+#define GEN6_GRDOM_RENDER (1 << 1)
+#define GEN6_GRDOM_MEDIA (1 << 2)
+#define GEN6_GRDOM_BLT (1 << 3)
+
+#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
+#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
+#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
+#define PP_DIR_DCLV_2G 0xffffffff
+
+#define GAM_ECOCHK 0x4090
+#define ECOCHK_SNB_BIT (1<<10)
+#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
+#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
+#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
+#define ECOCHK_PPGTT_GFDT_IVB (0x1<<4)
+#define ECOCHK_PPGTT_LLC_IVB (0x1<<3)
+#define ECOCHK_PPGTT_UC_HSW (0x1<<3)
+#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
+#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
+
+#define GAC_ECO_BITS 0x14090
+#define ECOBITS_SNB_BIT (1<<13)
+#define ECOBITS_PPGTT_CACHE64B (3<<8)
+#define ECOBITS_PPGTT_CACHE4B (0<<8)
+
+#define GAB_CTL 0x24000
+#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
+
+/* VGA stuff */
+
+#define VGA_ST01_MDA 0x3ba
+#define VGA_ST01_CGA 0x3da
+
+#define VGA_MSR_WRITE 0x3c2
+#define VGA_MSR_READ 0x3cc
+#define VGA_MSR_MEM_EN (1<<1)
+#define VGA_MSR_CGA_MODE (1<<0)
+
+#define VGA_SR_INDEX 0x3c4
+#define SR01 1
+#define VGA_SR_DATA 0x3c5
+
+#define VGA_AR_INDEX 0x3c0
+#define VGA_AR_VID_EN (1<<5)
+#define VGA_AR_DATA_WRITE 0x3c0
+#define VGA_AR_DATA_READ 0x3c1
+
+#define VGA_GR_INDEX 0x3ce
+#define VGA_GR_DATA 0x3cf
+/* GR05 */
+#define VGA_GR_MEM_READ_MODE_SHIFT 3
+#define VGA_GR_MEM_READ_MODE_PLANE 1
+/* GR06 */
+#define VGA_GR_MEM_MODE_MASK 0xc
+#define VGA_GR_MEM_MODE_SHIFT 2
+#define VGA_GR_MEM_A0000_AFFFF 0
+#define VGA_GR_MEM_A0000_BFFFF 1
+#define VGA_GR_MEM_B0000_B7FFF 2
+#define VGA_GR_MEM_B0000_BFFFF 3
+
+#define VGA_DACMASK 0x3c6
+#define VGA_DACRX 0x3c7
+#define VGA_DACWX 0x3c8
+#define VGA_DACDATA 0x3c9
+
+#define VGA_CR_INDEX_MDA 0x3b4
+#define VGA_CR_DATA_MDA 0x3b5
+#define VGA_CR_INDEX_CGA 0x3d4
+#define VGA_CR_DATA_CGA 0x3d5
+
+/*
+ * Memory interface instructions used by the kernel
+ */
+#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+
+#define MI_NOOP MI_INSTR(0, 0)
+#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
+#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
+#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
+#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+#define MI_FLUSH MI_INSTR(0x04, 0)
+#define MI_READ_FLUSH (1 << 0)
+#define MI_EXE_FLUSH (1 << 1)
+#define MI_NO_WRITE_FLUSH (1 << 2)
+#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
+#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
+#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
+#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
+#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
+#define MI_SUSPEND_FLUSH_EN (1<<0)
+#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
+#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
+#define MI_OVERLAY_CONTINUE (0x0<<21)
+#define MI_OVERLAY_ON (0x1<<21)
+#define MI_OVERLAY_OFF (0x2<<21)
+#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
+#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
+#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+/* IVB has funny definitions for which plane to flip. */
+#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
+#define MI_ARB_ENABLE (1<<0)
+#define MI_ARB_DISABLE (0<<0)
+
+#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
+#define MI_MM_SPACE_GTT (1<<8)
+#define MI_MM_SPACE_PHYSICAL (0<<8)
+#define MI_SAVE_EXT_STATE_EN (1<<3)
+#define MI_RESTORE_EXT_STATE_EN (1<<2)
+#define MI_FORCE_RESTORE (1<<1)
+#define MI_RESTORE_INHIBIT (1<<0)
+#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
+#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
+#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
+#define MI_STORE_DWORD_INDEX_SHIFT 2
+/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ * simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
+#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
+#define MI_FLUSH_DW_STORE_INDEX (1<<21)
+#define MI_INVALIDATE_TLB (1<<18)
+#define MI_FLUSH_DW_OP_STOREDW (1<<14)
+#define MI_INVALIDATE_BSD (1<<7)
+#define MI_FLUSH_DW_USE_GTT (1<<2)
+#define MI_FLUSH_DW_USE_PPGTT (0<<2)
+#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
+#define MI_BATCH_NON_SECURE (1)
+/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_PPGTT_HSW (1<<8)
+#define MI_BATCH_NON_SECURE_HSW (1<<13)
+#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
+#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
+#define MI_SEMAPHORE_UPDATE (1<<21)
+#define MI_SEMAPHORE_COMPARE (1<<20)
+#define MI_SEMAPHORE_REGISTER (1<<18)
+#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */
+#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */
+#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */
+#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */
+#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */
+#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */
+#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */
+#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */
+#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */
+#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */
+#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */
+#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */
+#define MI_SEMAPHORE_SYNC_INVALID (3<<16)
+/*
+ * 3D instructions used by the kernel
+ */
+#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
+
+#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
+#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define SC_UPDATE_SCISSOR (0x1<<1)
+#define SC_ENABLE_MASK (0x1<<0)
+#define SC_ENABLE (0x1<<0)
+#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define SCI_YMIN_MASK (0xffff<<16)
+#define SCI_XMIN_MASK (0xffff<<0)
+#define SCI_YMAX_MASK (0xffff<<16)
+#define SCI_XMAX_MASK (0xffff<<0)
+#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
+#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
+#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
+#define BLT_DEPTH_8 (0<<24)
+#define BLT_DEPTH_16_565 (1<<24)
+#define BLT_DEPTH_16_1555 (2<<24)
+#define BLT_DEPTH_32 (3<<24)
+#define BLT_ROP_GXCOPY (0xcc<<16)
+#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
+#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define ASYNC_FLIP (1<<22)
+#define DISPLAY_PLANE_A (0<<20)
+#define DISPLAY_PLANE_B (1<<20)
+#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
+#define PIPE_CONTROL_CS_STALL (1<<20)
+#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
+#define PIPE_CONTROL_QW_WRITE (1<<14)
+#define PIPE_CONTROL_DEPTH_STALL (1<<13)
+#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
+#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
+#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */
+#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
+#define PIPE_CONTROL_NOTIFY (1<<8)
+#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
+#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
+#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
+#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1)
+#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
+#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+
+
+/*
+ * Reset registers
+ */
+#define DEBUG_RESET_I830 0x6070
+#define DEBUG_RESET_FULL (1<<7)
+#define DEBUG_RESET_RENDER (1<<8)
+#define DEBUG_RESET_DISPLAY (1<<9)
+
+/*
+ * IOSF sideband
+ */
+#define VLV_IOSF_DOORBELL_REQ (VLV_DISPLAY_BASE + 0x2100)
+#define IOSF_DEVFN_SHIFT 24
+#define IOSF_OPCODE_SHIFT 16
+#define IOSF_PORT_SHIFT 8
+#define IOSF_BYTE_ENABLES_SHIFT 4
+#define IOSF_BAR_SHIFT 1
+#define IOSF_SB_BUSY (1<<0)
+#define IOSF_PORT_PUNIT 0x4
+#define IOSF_PORT_NC 0x11
+#define IOSF_PORT_DPIO 0x12
+#define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104)
+#define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108)
+
+#define PUNIT_OPCODE_REG_READ 6
+#define PUNIT_OPCODE_REG_WRITE 7
+
+#define PUNIT_REG_GPU_LFM 0xd3
+#define PUNIT_REG_GPU_FREQ_REQ 0xd4
+#define PUNIT_REG_GPU_FREQ_STS 0xd8
+#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
+
+#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
+#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
+
+#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c
+#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3
+#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8
+#define FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT 11
+#define FB_GFX_FGUARANTEED_FREQ_FUSE_MASK 0x0007f800
+#define IOSF_NC_FB_GFX_FMAX_FUSE_HI 0x34
+#define FB_FMAX_VMIN_FREQ_HI_MASK 0x00000007
+#define IOSF_NC_FB_GFX_FMAX_FUSE_LO 0x30
+#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
+#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
+
+/*
+ * DPIO - a special bus for various display related registers to hide behind
+ *
+ * DPIO is VLV only.
+ *
+ * Note: digital port B is DDI0, digital pot C is DDI1
+ */
+#define DPIO_DEVFN 0
+#define DPIO_OPCODE_REG_WRITE 1
+#define DPIO_OPCODE_REG_READ 0
+
+#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
+#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1<<1)
+#define DPIO_RESET (1<<0)
+
+#define _DPIO_TX3_SWING_CTL4_A 0x690
+#define _DPIO_TX3_SWING_CTL4_B 0x2a90
+#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \
+ _DPIO_TX3_SWING_CTL4_B)
+
+/*
+ * Per pipe/PLL DPIO regs
+ */
+#define _DPIO_DIV_A 0x800c
+#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
+#define DPIO_POST_DIV_DAC 0
+#define DPIO_POST_DIV_HDMIDP 1 /* DAC 225-400M rate */
+#define DPIO_POST_DIV_LVDS1 2
+#define DPIO_POST_DIV_LVDS2 3
+#define DPIO_K_SHIFT (24) /* 4 bits */
+#define DPIO_P1_SHIFT (21) /* 3 bits */
+#define DPIO_P2_SHIFT (16) /* 5 bits */
+#define DPIO_N_SHIFT (12) /* 4 bits */
+#define DPIO_ENABLE_CALIBRATION (1<<11)
+#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
+#define DPIO_M2DIV_MASK 0xff
+#define _DPIO_DIV_B 0x802c
+#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
+
+#define _DPIO_REFSFR_A 0x8014
+#define DPIO_REFSEL_OVERRIDE 27
+#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
+#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
+#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
+#define DPIO_PLL_REFCLK_SEL_MASK 3
+#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
+#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
+#define _DPIO_REFSFR_B 0x8034
+#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
+
+#define _DPIO_CORE_CLK_A 0x801c
+#define _DPIO_CORE_CLK_B 0x803c
+#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
+
+#define _DPIO_IREF_CTL_A 0x8040
+#define _DPIO_IREF_CTL_B 0x8060
+#define DPIO_IREF_CTL(pipe) _PIPE(pipe, _DPIO_IREF_CTL_A, _DPIO_IREF_CTL_B)
+
+#define DPIO_IREF_BCAST 0xc044
+#define _DPIO_IREF_A 0x8044
+#define _DPIO_IREF_B 0x8064
+#define DPIO_IREF(pipe) _PIPE(pipe, _DPIO_IREF_A, _DPIO_IREF_B)
+
+#define _DPIO_PLL_CML_A 0x804c
+#define _DPIO_PLL_CML_B 0x806c
+#define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B)
+
+#define _DPIO_LPF_COEFF_A 0x8048
+#define _DPIO_LPF_COEFF_B 0x8068
+#define DPIO_LPF_COEFF(pipe) _PIPE(pipe, _DPIO_LPF_COEFF_A, _DPIO_LPF_COEFF_B)
+
+#define DPIO_CALIBRATION 0x80ac
+
+#define DPIO_FASTCLK_DISABLE 0x8100
+
+/*
+ * Per DDI channel DPIO regs
+ */
+
+#define _DPIO_PCS_TX_0 0x8200
+#define _DPIO_PCS_TX_1 0x8400
+#define DPIO_PCS_TX_LANE2_RESET (1<<16)
+#define DPIO_PCS_TX_LANE1_RESET (1<<7)
+#define DPIO_PCS_TX(port) _PORT(port, _DPIO_PCS_TX_0, _DPIO_PCS_TX_1)
+
+#define _DPIO_PCS_CLK_0 0x8204
+#define _DPIO_PCS_CLK_1 0x8404
+#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22)
+#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
+#define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
+#define DPIO_PCS_CLK_SOFT_RESET (1<<5)
+#define DPIO_PCS_CLK(port) _PORT(port, _DPIO_PCS_CLK_0, _DPIO_PCS_CLK_1)
+
+#define _DPIO_PCS_CTL_OVR1_A 0x8224
+#define _DPIO_PCS_CTL_OVR1_B 0x8424
+#define DPIO_PCS_CTL_OVER1(port) _PORT(port, _DPIO_PCS_CTL_OVR1_A, \
+ _DPIO_PCS_CTL_OVR1_B)
+
+#define _DPIO_PCS_STAGGER0_A 0x822c
+#define _DPIO_PCS_STAGGER0_B 0x842c
+#define DPIO_PCS_STAGGER0(port) _PORT(port, _DPIO_PCS_STAGGER0_A, \
+ _DPIO_PCS_STAGGER0_B)
+
+#define _DPIO_PCS_STAGGER1_A 0x8230
+#define _DPIO_PCS_STAGGER1_B 0x8430
+#define DPIO_PCS_STAGGER1(port) _PORT(port, _DPIO_PCS_STAGGER1_A, \
+ _DPIO_PCS_STAGGER1_B)
+
+#define _DPIO_PCS_CLOCKBUF0_A 0x8238
+#define _DPIO_PCS_CLOCKBUF0_B 0x8438
+#define DPIO_PCS_CLOCKBUF0(port) _PORT(port, _DPIO_PCS_CLOCKBUF0_A, \
+ _DPIO_PCS_CLOCKBUF0_B)
+
+#define _DPIO_PCS_CLOCKBUF8_A 0x825c
+#define _DPIO_PCS_CLOCKBUF8_B 0x845c
+#define DPIO_PCS_CLOCKBUF8(port) _PORT(port, _DPIO_PCS_CLOCKBUF8_A, \
+ _DPIO_PCS_CLOCKBUF8_B)
+
+#define _DPIO_TX_SWING_CTL2_A 0x8288
+#define _DPIO_TX_SWING_CTL2_B 0x8488
+#define DPIO_TX_SWING_CTL2(port) _PORT(port, _DPIO_TX_SWING_CTL2_A, \
+ _DPIO_TX_SWING_CTL2_B)
+
+#define _DPIO_TX_SWING_CTL3_A 0x828c
+#define _DPIO_TX_SWING_CTL3_B 0x848c
+#define DPIO_TX_SWING_CTL3(port) _PORT(port, _DPIO_TX_SWING_CTL3_A, \
+ _DPIO_TX_SWING_CTL3_B)
+
+#define _DPIO_TX_SWING_CTL4_A 0x8290
+#define _DPIO_TX_SWING_CTL4_B 0x8490
+#define DPIO_TX_SWING_CTL4(port) _PORT(port, _DPIO_TX_SWING_CTL4_A, \
+ _DPIO_TX_SWING_CTL4_B)
+
+#define _DPIO_TX_OCALINIT_0 0x8294
+#define _DPIO_TX_OCALINIT_1 0x8494
+#define DPIO_TX_OCALINIT_EN (1UL<<31)
+#define DPIO_TX_OCALINIT(port) _PORT(port, _DPIO_TX_OCALINIT_0, \
+ _DPIO_TX_OCALINIT_1)
+
+#define _DPIO_TX_CTL_0 0x82ac
+#define _DPIO_TX_CTL_1 0x84ac
+#define DPIO_TX_CTL(port) _PORT(port, _DPIO_TX_CTL_0, _DPIO_TX_CTL_1)
+
+#define _DPIO_TX_LANE_0 0x82b8
+#define _DPIO_TX_LANE_1 0x84b8
+#define DPIO_TX_LANE(port) _PORT(port, _DPIO_TX_LANE_0, _DPIO_TX_LANE_1)
+
+#define _DPIO_DATA_CHANNEL1 0x8220
+#define _DPIO_DATA_CHANNEL2 0x8420
+#define DPIO_DATA_CHANNEL(port) _PORT(port, _DPIO_DATA_CHANNEL1, _DPIO_DATA_CHANNEL2)
+
+#define _DPIO_PORT0_PCS0 0x0220
+#define _DPIO_PORT0_PCS1 0x0420
+#define _DPIO_PORT1_PCS2 0x2620
+#define _DPIO_PORT1_PCS3 0x2820
+#define DPIO_DATA_LANE_A(port) _PORT(port, _DPIO_PORT0_PCS0, _DPIO_PORT1_PCS2)
+#define DPIO_DATA_LANE_B(port) _PORT(port, _DPIO_PORT0_PCS1, _DPIO_PORT1_PCS3)
+#define DPIO_DATA_CHANNEL1 0x8220
+#define DPIO_DATA_CHANNEL2 0x8420
+
+/*
+ * Fence registers
+ */
+#define FENCE_REG_830_0 0x2000
+#define FENCE_REG_945_8 0x3000
+#define I830_FENCE_START_MASK 0x07f80000
+#define I830_FENCE_TILING_Y_SHIFT 12
+#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
+#define I830_FENCE_PITCH_SHIFT 4
+#define I830_FENCE_REG_VALID (1<<0)
+#define I915_FENCE_MAX_PITCH_VAL 4
+#define I830_FENCE_MAX_PITCH_VAL 6
+#define I830_FENCE_MAX_SIZE_VAL (1<<8)
+
+#define I915_FENCE_START_MASK 0x0ff00000
+#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
+
+#define FENCE_REG_965_0 0x03000
+#define I965_FENCE_PITCH_SHIFT 2
+#define I965_FENCE_TILING_Y_SHIFT 1
+#define I965_FENCE_REG_VALID (1<<0)
+#define I965_FENCE_MAX_PITCH_VAL 0x0400
+
+#define FENCE_REG_SANDYBRIDGE_0 0x100000
+#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
+#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
+
+/* control register for cpu gtt access */
+#define TILECTL 0x101000
+#define TILECTL_SWZCTL (1 << 0)
+#define TILECTL_TLB_PREFETCH_DIS (1 << 2)
+#define TILECTL_BACKSNOOP_DIS (1 << 3)
+
+/*
+ * Instruction and interrupt control regs
+ */
+#define I810_PGETBL_CTL 0x2020
+#define I810_PGETBL_ENABLED 0x00000001
+#define I965_PGETBL_CTL2 0x20c4
+#define I965_PGETBL_SIZE_MASK 0x0000000e
+#define PGTBL_ER 0x02024
+#define RENDER_RING_BASE 0x02000
+#define BSD_RING_BASE 0x04000
+#define GEN6_BSD_RING_BASE 0x12000
+#define VEBOX_RING_BASE 0x1a000
+#define BLT_RING_BASE 0x22000
+#define RING_TAIL(base) ((base)+0x30)
+#define RING_HEAD(base) ((base)+0x34)
+#define RING_START(base) ((base)+0x38)
+#define RING_CTL(base) ((base)+0x3c)
+#define RING_SYNC_0(base) ((base)+0x40)
+#define RING_SYNC_1(base) ((base)+0x44)
+#define RING_SYNC_2(base) ((base)+0x48)
+#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
+#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
+#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE))
+#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
+#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
+#define GEN6_VVESYNC (RING_SYNC_2(GEN6_BSD_RING_BASE))
+#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
+#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
+#define GEN6_BVESYNC (RING_SYNC_2(BLT_RING_BASE))
+#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE))
+#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
+#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
+#define GEN6_NOSYNC 0
+#define RING_MAX_IDLE(base) ((base)+0x54)
+#define RING_HWS_PGA(base) ((base)+0x80)
+#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
+#define ARB_MODE 0x04030
+#define ARB_MODE_SWIZZLE_SNB (1<<4)
+#define ARB_MODE_SWIZZLE_IVB (1<<5)
+#define RENDER_HWS_PGA_GEN7 (0x04080)
+#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
+#define DONE_REG 0x40b0
+#define BSD_HWS_PGA_GEN7 (0x04180)
+#define BLT_HWS_PGA_GEN7 (0x04280)
+#define VEBOX_HWS_PGA_GEN7 (0x04380)
+#define RING_ACTHD(base) ((base)+0x74)
+#define RING_NOPID(base) ((base)+0x94)
+#define RING_IMR(base) ((base)+0xa8)
+#define RING_TIMESTAMP(base) ((base)+0x358)
+#define TAIL_ADDR 0x001FFFF8
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_NR_PAGES 0x001FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
+#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
+#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
+#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
+#if 0
+#define PRB0_TAIL 0x02030
+#define PRB0_HEAD 0x02034
+#define PRB0_START 0x02038
+#define PRB0_CTL 0x0203c
+#define PRB1_TAIL 0x02040 /* 915+ only */
+#define PRB1_HEAD 0x02044 /* 915+ only */
+#define PRB1_START 0x02048 /* 915+ only */
+#define PRB1_CTL 0x0204c /* 915+ only */
+#endif
+#define IPEIR_I965 0x02064
+#define IPEHR_I965 0x02068
+#define INSTDONE_I965 0x0206c
+#define GEN7_INSTDONE_1 0x0206c
+#define GEN7_SC_INSTDONE 0x07100
+#define GEN7_SAMPLER_INSTDONE 0x0e160
+#define GEN7_ROW_INSTDONE 0x0e164
+#define I915_NUM_INSTDONE_REG 4
+#define RING_IPEIR(base) ((base)+0x64)
+#define RING_IPEHR(base) ((base)+0x68)
+#define RING_INSTDONE(base) ((base)+0x6c)
+#define RING_INSTPS(base) ((base)+0x70)
+#define RING_DMA_FADD(base) ((base)+0x78)
+#define RING_INSTPM(base) ((base)+0xc0)
+#define INSTPS 0x02070 /* 965+ only */
+#define INSTDONE1 0x0207c /* 965+ only */
+#define ACTHD_I965 0x02074
+#define HWS_PGA 0x02080
+#define HWS_ADDRESS_MASK 0xfffff000
+#define HWS_START_ADDRESS_SHIFT 4
+#define PWRCTXA 0x2088 /* 965GM+ only */
+#define PWRCTX_EN (1<<0)
+#define IPEIR 0x02088
+#define IPEHR 0x0208c
+#define INSTDONE 0x02090
+#define HWSTAM 0x02098
+#define DMA_FADD_I8XX 0x020d0
+
+#define ERROR_GEN6 0x040a0
+#define GEN7_ERR_INT 0x44040
+#define ERR_INT_POISON (1UL<<31)
+#define ERR_INT_MMIO_UNCLAIMED (1<<13)
+#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
+#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
+#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
+
+#define FPGA_DBG 0x42300
+#define FPGA_DBG_RM_NOCLAIM (1UL<<31)
+
+#define DERRMR 0x44050
+
+/* GM45+ chicken bits -- debug workaround bits that may be required
+ * for various sorts of correct behavior. The top 16 bits of each are
+ * the enables for writing to the corresponding low bit.
+ */
+#define _3D_CHICKEN 0x02084
+#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
+#define _3D_CHICKEN2 0x0208c
+/* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+ * particular danger of not doing so is not specified.
+ */
+# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
+#define _3D_CHICKEN3 0x02090
+#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
+#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
+
+#define MI_MODE 0x0209c
+#define VS_TIMER_DISPATCH (1 << 6)
+#define MI_FLUSH_ENABLE (1 << 12)
+# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
+
+#define GEN6_GT_MODE 0x20d0
+#define GEN6_GT_MODE_HI (1 << 9)
+#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
+
+#define GFX_MODE 0x02520
+#define GFX_MODE_GEN7 0x0229c
+#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
+#define GFX_RUN_LIST_ENABLE (1<<15)
+#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
+#define GFX_SURFACE_FAULT_ENABLE (1<<12)
+#define GFX_REPLAY_MODE (1<<11)
+#define GFX_PSMI_GRANULARITY (1<<10)
+#define GFX_PPGTT_ENABLE (1<<9)
+
+#define VLV_DISPLAY_BASE 0x180000
+
+#define SCPD0 0x0209c /* 915+ only */
+#define IER 0x020a0
+#define IIR 0x020a4
+#define IMR 0x020a8
+#define ISR 0x020ac
+#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060)
+#define GCFG_DIS (1<<8)
+#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084)
+#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0)
+#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
+#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
+#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
+#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120)
+#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
+#define EIR 0x020b0
+#define EMR 0x020b4
+#define ESR 0x020b8
+#define GM45_ERROR_PAGE_TABLE (1<<5)
+#define GM45_ERROR_MEM_PRIV (1<<4)
+#define I915_ERROR_PAGE_TABLE (1<<4)
+#define GM45_ERROR_CP_PRIV (1<<3)
+#define I915_ERROR_MEMORY_REFRESH (1<<1)
+#define I915_ERROR_INSTRUCTION (1<<0)
+#define INSTPM 0x020c0
+#define INSTPM_SELF_EN (1<<12) /* 915GM only */
+#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
+ will not assert AGPBUSY# and will only
+ be delivered when out of C3. */
+#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1<<9)
+#define INSTPM_SYNC_FLUSH (1<<5)
+#define ACTHD 0x020c8
+#define FW_BLC 0x020d8
+#define FW_BLC2 0x020dc
+#define FW_BLC_SELF 0x020e0 /* 915+ only */
+#define FW_BLC_SELF_EN_MASK (1UL<<31)
+#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
+#define FW_BLC_SELF_EN (1<<15) /* 945 only */
+#define MM_BURST_LENGTH 0x00700000
+#define MM_FIFO_WATERMARK 0x0001F000
+#define LM_BURST_LENGTH 0x00000700
+#define LM_FIFO_WATERMARK 0x0000001F
+#define MI_ARB_STATE 0x020e4 /* 915+ only */
+
+/* Make render/texture TLB fetches lower priorty than associated data
+ * fetches. This is not turned on by default
+ */
+#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15)
+
+/* Isoch request wait on GTT enable (Display A/B/C streams).
+ * Make isoch requests stall on the TLB update. May cause
+ * display underruns (test mode only)
+ */
+#define MI_ARB_ISOCH_WAIT_GTT (1 << 14)
+
+/* Block grant count for isoch requests when block count is
+ * set to a finite value.
+ */
+#define MI_ARB_BLOCK_GRANT_MASK (3 << 12)
+#define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */
+#define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */
+#define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */
+#define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */
+
+/* Enable render writes to complete in C2/C3/C4 power states.
+ * If this isn't enabled, render writes are prevented in low
+ * power states. That seems bad to me.
+ */
+#define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11)
+
+/* This acknowledges an async flip immediately instead
+ * of waiting for 2TLB fetches.
+ */
+#define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10)
+
+/* Enables non-sequential data reads through arbiter
+ */
+#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
+
+/* Disable FSB snooping of cacheable write cycles from binner/render
+ * command stream
+ */
+#define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8)
+
+/* Arbiter time slice for non-isoch streams */
+#define MI_ARB_TIME_SLICE_MASK (7 << 5)
+#define MI_ARB_TIME_SLICE_1 (0 << 5)
+#define MI_ARB_TIME_SLICE_2 (1 << 5)
+#define MI_ARB_TIME_SLICE_4 (2 << 5)
+#define MI_ARB_TIME_SLICE_6 (3 << 5)
+#define MI_ARB_TIME_SLICE_8 (4 << 5)
+#define MI_ARB_TIME_SLICE_10 (5 << 5)
+#define MI_ARB_TIME_SLICE_14 (6 << 5)
+#define MI_ARB_TIME_SLICE_16 (7 << 5)
+
+/* Low priority grace period page size */
+#define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */
+#define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4)
+
+/* Disable display A/B trickle feed */
+#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2)
+
+/* Set display plane priority */
+#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
+#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
+
+#define CACHE_MODE_0 0x02120 /* 915+ only */
+#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
+#define CM0_IZ_OPT_DISABLE (1<<6)
+#define CM0_ZR_OPT_DISABLE (1<<5)
+#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
+#define CM0_DEPTH_EVICT_DISABLE (1<<4)
+#define CM0_COLOR_EVICT_DISABLE (1<<3)
+#define CM0_DEPTH_WRITE_DISABLE (1<<1)
+#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define BB_ADDR 0x02140 /* 8 bytes */
+#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
+#define GFX_FLSH_CNTL_GEN6 0x101008
+#define GFX_FLSH_CNTL_EN (1<<0)
+#define ECOSKPD 0x021d0
+#define ECO_GATING_CX_ONLY (1<<3)
+#define ECO_FLIP_DONE (1<<0)
+
+#define CACHE_MODE_1 0x7004 /* IVB+ */
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
+
+#define GEN6_BLITTER_ECOSKPD 0x221d0
+#define GEN6_BLITTER_LOCK_SHIFT 16
+#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
+
+#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
+#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
+#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
+#define GEN6_BSD_SLEEP_INDICATOR (1 << 3)
+#define GEN6_BSD_GO_INDICATOR (1 << 4)
+
+/* On modern GEN architectures interrupt control consists of two sets
+ * of registers. The first set pertains to the ring generating the
+ * interrupt. The second control is for the functional block generating the
+ * interrupt. These are PM, GT, DE, etc.
+ *
+ * Luckily *knocks on wood* all the ring interrupt bits match up with the
+ * GT interrupt bits, so we don't need to duplicate the defines.
+ *
+ * These defines should cover us well from SNB->HSW with minor exceptions
+ * it can also work on ILK.
+ */
+#define GT_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
+#define GT_BLT_CS_ERROR_INTERRUPT (1 << 25)
+#define GT_BLT_USER_INTERRUPT (1 << 22)
+#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
+#define GT_BSD_USER_INTERRUPT (1 << 12)
+#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
+#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
+#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
+#define GT_RENDER_SYNC_STATUS_INTERRUPT (1 << 2)
+#define GT_RENDER_DEBUG_INTERRUPT (1 << 1)
+#define GT_RENDER_USER_INTERRUPT (1 << 0)
+
+#define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */
+#define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */
+
+/* These are all the "old" interrupts */
+#define ILK_BSD_USER_INTERRUPT (1<<5)
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
+#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
+#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
+#define I915_HWB_OOM_INTERRUPT (1<<13)
+#define I915_SYNC_STATUS_INTERRUPT (1<<12)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
+#define I915_DEBUG_INTERRUPT (1<<2)
+#define I915_USER_INTERRUPT (1<<1)
+#define I915_ASLE_INTERRUPT (1<<0)
+#define I915_BSD_USER_INTERRUPT (1 << 25)
+
+#define GEN6_BSD_RNCID 0x12198
+
+#define GEN7_FF_THREAD_MODE 0x20a0
+#define GEN7_FF_SCHED_MASK 0x0077070
+#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
+#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
+#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
+#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
+#define GEN7_FF_VS_REF_CNT_FFME (1 << 15)
+#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
+#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
+#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
+#define GEN7_FF_VS_SCHED_HW (0x0<<12)
+#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
+#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
+#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
+#define GEN7_FF_DS_SCHED_HW (0x0<<4)
+
+/*
+ * Framebuffer compression (915+ only)
+ */
+
+#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
+#define FBC_LL_BASE 0x03204 /* 4k page aligned */
+#define FBC_CONTROL 0x03208
+#define FBC_CTL_EN (1UL<<31) /* OSOL_i915 */
+#define FBC_CTL_PERIODIC (1<<30)
+#define FBC_CTL_INTERVAL_SHIFT (16)
+#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define FBC_CTL_C3_IDLE (1<<13)
+#define FBC_CTL_STRIDE_SHIFT (5)
+#define FBC_CTL_FENCENO (1<<0)
+#define FBC_COMMAND 0x0320c
+#define FBC_CMD_COMPRESS (1<<0)
+#define FBC_STATUS 0x03210
+#define FBC_STAT_COMPRESSING (1UL<<31) /* OSOL_i915 */
+#define FBC_STAT_COMPRESSED (1<<30)
+#define FBC_STAT_MODIFIED (1<<29)
+#define FBC_STAT_CURRENT_LINE (1<<0)
+#define FBC_CONTROL2 0x03214
+#define FBC_CTL_FENCE_DBL (0<<4)
+#define FBC_CTL_IDLE_IMM (0<<2)
+#define FBC_CTL_IDLE_FULL (1<<2)
+#define FBC_CTL_IDLE_LINE (2<<2)
+#define FBC_CTL_IDLE_DEBUG (3<<2)
+#define FBC_CTL_CPU_FENCE (1<<1)
+#define FBC_CTL_PLANEA (0<<0)
+#define FBC_CTL_PLANEB (1<<0)
+#define FBC_FENCE_OFF 0x0321b
+#define FBC_TAG 0x03300
+
+#define FBC_LL_SIZE (1536)
+
+/* Framebuffer compression for GM45+ */
+#define DPFC_CB_BASE 0x3200
+#define DPFC_CONTROL 0x3208
+#define DPFC_CTL_EN (1UL<<31) /* OSOL_i915 */
+#define DPFC_CTL_PLANEA (0<<30)
+#define DPFC_CTL_PLANEB (1<<30)
+#define IVB_DPFC_CTL_PLANE_SHIFT (29)
+#define DPFC_CTL_FENCE_EN (1<<29)
+#define IVB_DPFC_CTL_FENCE_EN (1<<28)
+#define DPFC_CTL_PERSISTENT_MODE (1<<25)
+#define DPFC_SR_EN (1<<10)
+#define DPFC_CTL_LIMIT_1X (0<<6)
+#define DPFC_CTL_LIMIT_2X (1<<6)
+#define DPFC_CTL_LIMIT_4X (2<<6)
+#define DPFC_RECOMP_CTL 0x320c
+#define DPFC_RECOMP_STALL_EN (1<<27)
+#define DPFC_RECOMP_STALL_WM_SHIFT (16)
+#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
+#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
+#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
+#define DPFC_STATUS 0x3210
+#define DPFC_INVAL_SEG_SHIFT (16)
+#define DPFC_INVAL_SEG_MASK (0x07ff0000)
+#define DPFC_COMP_SEG_SHIFT (0)
+#define DPFC_COMP_SEG_MASK (0x000003ff)
+#define DPFC_STATUS2 0x3214
+#define DPFC_FENCE_YOFF 0x3218
+#define DPFC_CHICKEN 0x3224
+#define DPFC_HT_MODIFY (1UL<<31) /* OSOL_i915 */
+
+/* Framebuffer compression for Ironlake */
+#define ILK_DPFC_CB_BASE 0x43200
+#define ILK_DPFC_CONTROL 0x43208
+/* The bit 28-8 is reserved */
+#define DPFC_RESERVED (0x1FFFFF00)
+#define ILK_DPFC_RECOMP_CTL 0x4320c
+#define ILK_DPFC_STATUS 0x43210
+#define ILK_DPFC_FENCE_YOFF 0x43218
+#define ILK_DPFC_CHICKEN 0x43224
+#define ILK_FBC_RT_BASE 0x2128
+#define ILK_FBC_RT_VALID (1<<0)
+
+#define ILK_DISPLAY_CHICKEN1 0x42000
+#define ILK_FBCQ_DIS (1<<22)
+#define ILK_PABSTRETCH_DIS (1<<21)
+
+
+/*
+ * Framebuffer compression for Sandybridge
+ *
+ * The following two registers are of type GTTMMADR
+ */
+#define SNB_DPFC_CTL_SA 0x100100
+#define SNB_CPU_FENCE_ENABLE (1<<29)
+#define DPFC_CPU_FENCE_OFFSET 0x100104
+
+/* Framebuffer compression for Ivybridge */
+#define IVB_FBC_RT_BASE 0x7020
+
+#define IPS_CTL 0x43408
+#define IPS_ENABLE (1UL << 31)
+
+#define MSG_FBC_REND_STATE 0x50380
+#define FBC_REND_NUKE (1<<2)
+#define FBC_REND_CACHE_CLEAN (1<<1)
+
+#define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0
+#define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4
+#define HSW_BYPASS_FBC_QUEUE (1<<22)
+#define HSW_PIPE_SLICE_CHICKEN_1(pipe) _PIPE(pipe, + \
+ _HSW_PIPE_SLICE_CHICKEN_1_A, + \
+ _HSW_PIPE_SLICE_CHICKEN_1_B)
+
+#define HSW_CLKGATE_DISABLE_PART_1 0x46500
+#define HSW_DPFC_GATING_DISABLE (1<<23)
+
+/*
+ * GPIO regs
+ */
+#define GPIOA 0x5010
+#define GPIOB 0x5014
+#define GPIOC 0x5018
+#define GPIOD 0x501c
+#define GPIOE 0x5020
+#define GPIOF 0x5024
+#define GPIOG 0x5028
+#define GPIOH 0x502c
+# define GPIO_CLOCK_DIR_MASK (1 << 0)
+# define GPIO_CLOCK_DIR_IN (0 << 1)
+# define GPIO_CLOCK_DIR_OUT (1 << 1)
+# define GPIO_CLOCK_VAL_MASK (1 << 2)
+# define GPIO_CLOCK_VAL_OUT (1 << 3)
+# define GPIO_CLOCK_VAL_IN (1 << 4)
+# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
+# define GPIO_DATA_DIR_MASK (1 << 8)
+# define GPIO_DATA_DIR_IN (0 << 9)
+# define GPIO_DATA_DIR_OUT (1 << 9)
+# define GPIO_DATA_VAL_MASK (1 << 10)
+# define GPIO_DATA_VAL_OUT (1 << 11)
+# define GPIO_DATA_VAL_IN (1 << 12)
+# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
+#define GMBUS0 0x5100
+#define GMBUS_RATE_100KHZ (0<<8)
+#define GMBUS_RATE_50KHZ (1<<8)
+#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_PORT_DISABLED 0
+#define GMBUS_PORT_SSC 1
+#define GMBUS_PORT_VGADDC 2
+#define GMBUS_PORT_PANEL 3
+#define GMBUS_PORT_DPC 4 /* HDMIC */
+#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
+#define GMBUS_PORT_DPD 6 /* HDMID */
+#define GMBUS_PORT_RESERVED 7 /* 7 reserved */
+#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
+#define GMBUS1 0x5104 /* command/status */
+#define GMBUS_SW_CLR_INT (1UL<<31)
+#define GMBUS_SW_RDY (1<<30)
+#define GMBUS_ENT (1<<29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0<<25)
+#define GMBUS_CYCLE_WAIT (1<<25)
+#define GMBUS_CYCLE_INDEX (2<<25)
+#define GMBUS_CYCLE_STOP (4<<25)
+#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_SLAVE_INDEX_SHIFT 8
+#define GMBUS_SLAVE_ADDR_SHIFT 1
+#define GMBUS_SLAVE_READ (1<<0)
+#define GMBUS_SLAVE_WRITE (0<<0)
+#define GMBUS2 0x5108 /* status */
+#define GMBUS_INUSE (1<<15)
+#define GMBUS_HW_WAIT_PHASE (1<<14)
+#define GMBUS_STALL_TIMEOUT (1<<13)
+#define GMBUS_INT (1<<12)
+#define GMBUS_HW_RDY (1<<11)
+#define GMBUS_SATOER (1<<10)
+#define GMBUS_ACTIVE (1<<9)
+#define GMBUS3 0x510c /* data buffer bytes 3-0 */
+#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
+#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define GMBUS_NAK_EN (1<<3)
+#define GMBUS_IDLE_EN (1<<2)
+#define GMBUS_HW_WAIT_EN (1<<1)
+#define GMBUS_HW_RDY_EN (1<<0)
+#define GMBUS5 0x5120 /* byte index */
+#define GMBUS_2BYTE_INDEX_EN (1UL<<31)
+
+/*
+ * Clock control & power management
+ */
+
+#define VGA0 0x6000
+#define VGA1 0x6004
+#define VGA_PD 0x6010
+#define VGA0_PD_P2_DIV_4 (1 << 7)
+#define VGA0_PD_P1_DIV_2 (1 << 5)
+#define VGA0_PD_P1_SHIFT 0
+#define VGA0_PD_P1_MASK (0x1f << 0)
+#define VGA1_PD_P2_DIV_4 (1 << 15)
+#define VGA1_PD_P1_DIV_2 (1 << 13)
+#define VGA1_PD_P1_SHIFT 8
+#define VGA1_PD_P1_MASK (0x1f << 8)
+#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
+#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
+#define DPLL_VCO_ENABLE (1UL << 31) /* OSOL_i915 */
+#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
+#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
+#define DPLL_VGA_MODE_DIS (1 << 28)
+#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
+#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
+#define DPLL_MODE_MASK (3 << 26)
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
+#define DPLL_LOCK_VLV (1<<15)
+#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14)
+#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
+#define DPLL_PORTC_READY_MASK (0xf << 4)
+#define DPLL_PORTB_READY_MASK (0xf)
+
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
+/* i830, required in DVO non-gang */
+#define PLL_P2_DIVIDE_BY_4 (1 << 23)
+#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+#define PLL_REF_INPUT_DREFCLK (0 << 13)
+#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
+#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
+#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+#define PLL_REF_INPUT_MASK (3 << 13)
+#define PLL_LOAD_PULSE_PHASE_SHIFT 9
+/* Ironlake */
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
+# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
+# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
+
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ */
+#define SDVO_MULTIPLIER_MASK 0x000000ff
+#define SDVO_MULTIPLIER_SHIFT_HIRES 4
+#define SDVO_MULTIPLIER_SHIFT_VGA 0
+#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
+ */
+#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
+#define DPLL_MD_UDI_DIVIDER_SHIFT 24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
+#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
+#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
+
+#define _FPA0 0x06040
+#define _FPA1 0x06044
+#define _FPB0 0x06048
+#define _FPB1 0x0604c
+#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0)
+#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1)
+#define FP_N_DIV_MASK 0x003f0000
+#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
+#define FP_N_DIV_SHIFT 16
+#define FP_M1_DIV_MASK 0x00003f00
+#define FP_M1_DIV_SHIFT 8
+#define FP_M2_DIV_MASK 0x0000003f
+#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
+#define FP_M2_DIV_SHIFT 0
+#define DPLL_TEST 0x606c
+#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
+#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
+#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
+#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
+#define DPLLB_TEST_N_BYPASS (1 << 19)
+#define DPLLB_TEST_M_BYPASS (1 << 18)
+#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
+#define DPLLA_TEST_N_BYPASS (1 << 3)
+#define DPLLA_TEST_M_BYPASS (1 << 2)
+#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+#define D_STATE 0x6104
+#define DSTATE_GFX_RESET_I830 (1<<6)
+#define DSTATE_PLL_D3_OFF (1<<3)
+#define DSTATE_GFX_CLOCK_GATING (1<<1)
+#define DSTATE_DOT_CLOCK_GATING (1<<0)
+#define DSPCLK_GATE_D (dev_priv->info->display_mmio_offset + 0x6200)
+# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
+# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
+# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
+# define VRDUNIT_CLOCK_GATE_DISABLE (1 << 27) /* 965 */
+# define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */
+# define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */
+# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */
+# define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */
+# define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */
+# define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */
+# define TVEUNIT_CLOCK_GATE_DISABLE (1 << 20) /* 915-945 */
+# define DVSUNIT_CLOCK_GATE_DISABLE (1 << 19) /* 915-945 */
+# define DSSUNIT_CLOCK_GATE_DISABLE (1 << 18) /* 915-945 */
+# define DDBUNIT_CLOCK_GATE_DISABLE (1 << 17) /* 915-945 */
+# define DPRUNIT_CLOCK_GATE_DISABLE (1 << 16) /* 915-945 */
+# define DPFUNIT_CLOCK_GATE_DISABLE (1 << 15) /* 915-945 */
+# define DPBMUNIT_CLOCK_GATE_DISABLE (1 << 14) /* 915-945 */
+# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) /* 915-945 */
+# define DPLUNIT_CLOCK_GATE_DISABLE (1 << 12) /* 915-945 */
+# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11)
+# define DPBUNIT_CLOCK_GATE_DISABLE (1 << 10)
+# define DCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+# define DPUNIT_CLOCK_GATE_DISABLE (1 << 8)
+# define VRUNIT_CLOCK_GATE_DISABLE (1 << 7) /* 915+: reserved */
+# define OVHUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 830-865 */
+# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */
+# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5)
+# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4)
+/**
+ * This bit must be set on the 830 to prevent hangs when turning off the
+ * overlay scaler.
+ */
+# define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3)
+# define OVCUNIT_CLOCK_GATE_DISABLE (1 << 2)
+# define OVUUNIT_CLOCK_GATE_DISABLE (1 << 1)
+# define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */
+# define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */
+
+#define RENCLK_GATE_D1 0x6204
+# define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */
+# define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */
+# define PC_FE_CLOCK_GATE_DISABLE (1 << 11)
+# define PC_BE_CLOCK_GATE_DISABLE (1 << 10)
+# define WINDOWER_CLOCK_GATE_DISABLE (1 << 9)
+# define INTERPOLATOR_CLOCK_GATE_DISABLE (1 << 8)
+# define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7)
+# define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6)
+# define MAG_CLOCK_GATE_DISABLE (1 << 5)
+/** This bit must be unset on 855,865 */
+# define MECI_CLOCK_GATE_DISABLE (1 << 4)
+# define DCMP_CLOCK_GATE_DISABLE (1 << 3)
+# define MEC_CLOCK_GATE_DISABLE (1 << 2)
+# define MECO_CLOCK_GATE_DISABLE (1 << 1)
+/** This bit must be set on 855,865. */
+# define SV_CLOCK_GATE_DISABLE (1 << 0)
+# define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16)
+# define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15)
+# define I915_MOTION_COMP_CLOCK_GATE_DISABLE (1 << 14)
+# define I915_BD_BF_CLOCK_GATE_DISABLE (1 << 13)
+# define I915_SF_SE_CLOCK_GATE_DISABLE (1 << 12)
+# define I915_WM_CLOCK_GATE_DISABLE (1 << 11)
+# define I915_IZ_CLOCK_GATE_DISABLE (1 << 10)
+# define I915_PI_CLOCK_GATE_DISABLE (1 << 9)
+# define I915_DI_CLOCK_GATE_DISABLE (1 << 8)
+# define I915_SH_SV_CLOCK_GATE_DISABLE (1 << 7)
+# define I915_PL_DG_QC_FT_CLOCK_GATE_DISABLE (1 << 6)
+# define I915_SC_CLOCK_GATE_DISABLE (1 << 5)
+# define I915_FL_CLOCK_GATE_DISABLE (1 << 4)
+# define I915_DM_CLOCK_GATE_DISABLE (1 << 3)
+# define I915_PS_CLOCK_GATE_DISABLE (1 << 2)
+# define I915_CC_CLOCK_GATE_DISABLE (1 << 1)
+# define I915_BY_CLOCK_GATE_DISABLE (1 << 0)
+
+# define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30)
+/** This bit must always be set on 965G/965GM */
+# define I965_RCC_CLOCK_GATE_DISABLE (1 << 29)
+# define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28)
+# define I965_DAP_CLOCK_GATE_DISABLE (1 << 27)
+# define I965_ROC_CLOCK_GATE_DISABLE (1 << 26)
+# define I965_GW_CLOCK_GATE_DISABLE (1 << 25)
+# define I965_TD_CLOCK_GATE_DISABLE (1 << 24)
+/** This bit must always be set on 965G */
+# define I965_ISC_CLOCK_GATE_DISABLE (1 << 23)
+# define I965_IC_CLOCK_GATE_DISABLE (1 << 22)
+# define I965_EU_CLOCK_GATE_DISABLE (1 << 21)
+# define I965_IF_CLOCK_GATE_DISABLE (1 << 20)
+# define I965_TC_CLOCK_GATE_DISABLE (1 << 19)
+# define I965_SO_CLOCK_GATE_DISABLE (1 << 17)
+# define I965_FBC_CLOCK_GATE_DISABLE (1 << 16)
+# define I965_MARI_CLOCK_GATE_DISABLE (1 << 15)
+# define I965_MASF_CLOCK_GATE_DISABLE (1 << 14)
+# define I965_MAWB_CLOCK_GATE_DISABLE (1 << 13)
+# define I965_EM_CLOCK_GATE_DISABLE (1 << 12)
+# define I965_UC_CLOCK_GATE_DISABLE (1 << 11)
+# define I965_SI_CLOCK_GATE_DISABLE (1 << 6)
+# define I965_MT_CLOCK_GATE_DISABLE (1 << 5)
+# define I965_PL_CLOCK_GATE_DISABLE (1 << 4)
+# define I965_DG_CLOCK_GATE_DISABLE (1 << 3)
+# define I965_QC_CLOCK_GATE_DISABLE (1 << 2)
+# define I965_FT_CLOCK_GATE_DISABLE (1 << 1)
+# define I965_DM_CLOCK_GATE_DISABLE (1 << 0)
+
+#define RENCLK_GATE_D2 0x6208
+#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9)
+#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7)
+#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6)
+#define RAMCLK_GATE_D 0x6210 /* CRL only */
+#define DEUC 0x6214 /* CRL only */
+
+#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500)
+#define FW_CSPWRDWNEN (1<<15)
+
+#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504)
+
+/*
+ * Palette regs
+ */
+
+#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000)
+#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800)
+#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
+
+/* MCH MMIO space */
+
+/*
+ * MCHBAR mirror.
+ *
+ * This mirrors the MCHBAR MMIO space whose location is determined by
+ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
+ * every way. It is not accessible from the CP register read instructions.
+ *
+ */
+#define MCHBAR_MIRROR_BASE 0x10000
+
+#define MCHBAR_MIRROR_BASE_SNB 0x140000
+
+/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
+#define DCLK 0x5e04
+
+/** 915-945 and GM965 MCH register controlling DRAM channel access */
+#define DCC 0x10200
+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
+#define DCC_ADDRESSING_MODE_MASK (3 << 0)
+#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
+#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
+
+/** Pineview MCH register contains DDR3 setting */
+#define CSHRDDR3CTL 0x101a8
+#define CSHRDDR3CTL_DDR3 (1 << 2)
+
+/** 965 MCH register controlling DRAM channel configuration */
+#define C0DRB3 0x10206
+#define C1DRB3 0x10606
+
+/** snb MCH registers for reading the DRAM channel configuration */
+#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004)
+#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008)
+#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C)
+#define MAD_DIMM_ECC_MASK (0x3 << 24)
+#define MAD_DIMM_ECC_OFF (0x0 << 24)
+#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
+#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24)
+#define MAD_DIMM_ECC_ON (0x3 << 24)
+#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22)
+#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21)
+#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */
+#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */
+#define MAD_DIMM_B_DUAL_RANK (0x1 << 18)
+#define MAD_DIMM_A_DUAL_RANK (0x1 << 17)
+#define MAD_DIMM_A_SELECT (0x1 << 16)
+/* DIMM sizes are in multiples of 256mb. */
+#define MAD_DIMM_B_SIZE_SHIFT 8
+#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT)
+#define MAD_DIMM_A_SIZE_SHIFT 0
+#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
+
+/** snb MCH registers for priority tuning */
+#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10)
+#define MCH_SSKPD_WM0_MASK 0x3f
+#define MCH_SSKPD_WM0_VAL 0xc
+
+/* Clocking configuration register */
+#define CLKCFG 0x10c00
+#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
+#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
+#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
+#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
+#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
+#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
+/* Note, below two are guess */
+#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */
+#define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */
+#define CLKCFG_FSB_MASK (7 << 0)
+#define CLKCFG_MEM_533 (1 << 4)
+#define CLKCFG_MEM_667 (2 << 4)
+#define CLKCFG_MEM_800 (3 << 4)
+#define CLKCFG_MEM_MASK (7 << 4)
+
+#define TSC1 0x11001
+#define TSE (1<<0)
+#define TR1 0x11006
+#define TSFS 0x11020
+#define TSFS_SLOPE_MASK 0x0000ff00
+#define TSFS_SLOPE_SHIFT 8
+#define TSFS_INTR_MASK 0x000000ff
+
+#define CRSTANDVID 0x11100
+#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
+#define PXVFREQ_PX_MASK 0x7f000000
+#define PXVFREQ_PX_SHIFT 24
+#define VIDFREQ_BASE 0x11110
+#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
+#define VIDFREQ2 0x11114
+#define VIDFREQ3 0x11118
+#define VIDFREQ4 0x1111c
+#define VIDFREQ_P0_MASK 0x1f000000
+#define VIDFREQ_P0_SHIFT 24
+#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
+#define VIDFREQ_P0_CSCLK_SHIFT 20
+#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
+#define VIDFREQ_P0_CRCLK_SHIFT 16
+#define VIDFREQ_P1_MASK 0x00001f00
+#define VIDFREQ_P1_SHIFT 8
+#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
+#define VIDFREQ_P1_CSCLK_SHIFT 4
+#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
+#define INTTOEXT_BASE_ILK 0x11300
+#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */
+#define INTTOEXT_MAP3_SHIFT 24
+#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
+#define INTTOEXT_MAP2_SHIFT 16
+#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
+#define INTTOEXT_MAP1_SHIFT 8
+#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
+#define INTTOEXT_MAP0_SHIFT 0
+#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
+#define MEMSWCTL 0x11170 /* Ironlake only */
+#define MEMCTL_CMD_MASK 0xe000
+#define MEMCTL_CMD_SHIFT 13
+#define MEMCTL_CMD_RCLK_OFF 0
+#define MEMCTL_CMD_RCLK_ON 1
+#define MEMCTL_CMD_CHFREQ 2
+#define MEMCTL_CMD_CHVID 3
+#define MEMCTL_CMD_VMMOFF 4
+#define MEMCTL_CMD_VMMON 5
+#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
+ when command complete */
+#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
+#define MEMCTL_FREQ_SHIFT 8
+#define MEMCTL_SFCAVM (1<<7)
+#define MEMCTL_TGT_VID_MASK 0x007f
+#define MEMIHYST 0x1117c
+#define MEMINTREN 0x11180 /* 16 bits */
+#define MEMINT_RSEXIT_EN (1<<8)
+#define MEMINT_CX_SUPR_EN (1<<7)
+#define MEMINT_CONT_BUSY_EN (1<<6)
+#define MEMINT_AVG_BUSY_EN (1<<5)
+#define MEMINT_EVAL_CHG_EN (1<<4)
+#define MEMINT_MON_IDLE_EN (1<<3)
+#define MEMINT_UP_EVAL_EN (1<<2)
+#define MEMINT_DOWN_EVAL_EN (1<<1)
+#define MEMINT_SW_CMD_EN (1<<0)
+#define MEMINTRSTR 0x11182 /* 16 bits */
+#define MEM_RSEXIT_MASK 0xc000
+#define MEM_RSEXIT_SHIFT 14
+#define MEM_CONT_BUSY_MASK 0x3000
+#define MEM_CONT_BUSY_SHIFT 12
+#define MEM_AVG_BUSY_MASK 0x0c00
+#define MEM_AVG_BUSY_SHIFT 10
+#define MEM_EVAL_CHG_MASK 0x0300
+#define MEM_EVAL_BUSY_SHIFT 8
+#define MEM_MON_IDLE_MASK 0x00c0
+#define MEM_MON_IDLE_SHIFT 6
+#define MEM_UP_EVAL_MASK 0x0030
+#define MEM_UP_EVAL_SHIFT 4
+#define MEM_DOWN_EVAL_MASK 0x000c
+#define MEM_DOWN_EVAL_SHIFT 2
+#define MEM_SW_CMD_MASK 0x0003
+#define MEM_INT_STEER_GFX 0
+#define MEM_INT_STEER_CMR 1
+#define MEM_INT_STEER_SMI 2
+#define MEM_INT_STEER_SCI 3
+#define MEMINTRSTS 0x11184
+#define MEMINT_RSEXIT (1<<7)
+#define MEMINT_CONT_BUSY (1<<6)
+#define MEMINT_AVG_BUSY (1<<5)
+#define MEMINT_EVAL_CHG (1<<4)
+#define MEMINT_MON_IDLE (1<<3)
+#define MEMINT_UP_EVAL (1<<2)
+#define MEMINT_DOWN_EVAL (1<<1)
+#define MEMINT_SW_CMD (1<<0)
+#define MEMMODECTL 0x11190
+#define MEMMODE_BOOST_EN (1UL<<31)
+#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
+#define MEMMODE_BOOST_FREQ_SHIFT 24
+#define MEMMODE_IDLE_MODE_MASK 0x00030000
+#define MEMMODE_IDLE_MODE_SHIFT 16
+#define MEMMODE_IDLE_MODE_EVAL 0
+#define MEMMODE_IDLE_MODE_CONT 1
+#define MEMMODE_HWIDLE_EN (1<<15)
+#define MEMMODE_SWMODE_EN (1<<14)
+#define MEMMODE_RCLK_GATE (1<<13)
+#define MEMMODE_HW_UPDATE (1<<12)
+#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
+#define MEMMODE_FSTART_SHIFT 8
+#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
+#define MEMMODE_FMAX_SHIFT 4
+#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
+#define RCBMAXAVG 0x1119c
+#define MEMSWCTL2 0x1119e /* Cantiga only */
+#define SWMEMCMD_RENDER_OFF (0 << 13)
+#define SWMEMCMD_RENDER_ON (1 << 13)
+#define SWMEMCMD_SWFREQ (2 << 13)
+#define SWMEMCMD_TARVID (3 << 13)
+#define SWMEMCMD_VRM_OFF (4 << 13)
+#define SWMEMCMD_VRM_ON (5 << 13)
+#define CMDSTS (1<<12)
+#define SFCAVM (1<<11)
+#define SWFREQ_MASK 0x0380 /* P0-7 */
+#define SWFREQ_SHIFT 7
+#define TARVID_MASK 0x001f
+#define MEMSTAT_CTG 0x111a0
+#define RCBMINAVG 0x111a0
+#define RCUPEI 0x111b0
+#define RCDNEI 0x111b4
+#define RSTDBYCTL 0x111b8
+#define RS1EN (1UL<<31)
+#define RS2EN (1UL<<30)
+#define RS3EN (1<<29)
+#define D3RS3EN (1<<28) /* Display D3 imlies RS3 */
+#define SWPROMORSX (1<<27) /* RSx promotion timers ignored */
+#define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */
+#define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */
+#define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */
+#define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */
+#define RSX_STATUS_MASK (7<<20)
+#define RSX_STATUS_ON (0<<20)
+#define RSX_STATUS_RC1 (1<<20)
+#define RSX_STATUS_RC1E (2<<20)
+#define RSX_STATUS_RS1 (3<<20)
+#define RSX_STATUS_RS2 (4<<20) /* aka rc6 */
+#define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */
+#define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */
+#define RSX_STATUS_RSVD2 (7<<20)
+#define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */
+#define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */
+#define JRSC (1<<17) /* rsx coupled to cpu c-state */
+#define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */
+#define RS1CONTSAV_MASK (3<<14)
+#define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */
+#define RS1CONTSAV_RSVD (1<<14)
+#define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */
+#define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */
+#define NORMSLEXLAT_MASK (3<<12)
+#define SLOW_RS123 (0<<12)
+#define SLOW_RS23 (1<<12)
+#define SLOW_RS3 (2<<12)
+#define NORMAL_RS123 (3<<12)
+#define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */
+#define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */
+#define STATELOCK (1<<7) /* locked to rs_cstate if 0 */
+#define RS_CSTATE_MASK (3<<4)
+#define RS_CSTATE_C367_RS1 (0<<4)
+#define RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
+#define RS_CSTATE_RSVD (2<<4)
+#define RS_CSTATE_C367_RS2 (3<<4)
+#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
+#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
+#define VIDCTL 0x111c0
+#define VIDSTS 0x111c8
+#define VIDSTART 0x111cc /* 8 bits */
+#define MEMSTAT_ILK 0x111f8
+#define MEMSTAT_VID_MASK 0x7f00
+#define MEMSTAT_VID_SHIFT 8
+#define MEMSTAT_PSTATE_MASK 0x00f8
+#define MEMSTAT_PSTATE_SHIFT 3
+#define MEMSTAT_MON_ACTV (1<<2)
+#define MEMSTAT_SRC_CTL_MASK 0x0003
+#define MEMSTAT_SRC_CTL_CORE 0
+#define MEMSTAT_SRC_CTL_TRB 1
+#define MEMSTAT_SRC_CTL_THM 2
+#define MEMSTAT_SRC_CTL_STDBY 3
+#define RCPREVBSYTUPAVG 0x113b8
+#define RCPREVBSYTDNAVG 0x113bc
+#define PMMISC 0x11214
+#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */
+#define SDEW 0x1124c
+#define CSIEW0 0x11250
+#define CSIEW1 0x11254
+#define CSIEW2 0x11258
+#define PEW 0x1125c
+#define DEW 0x11270
+#define MCHAFE 0x112c0
+#define CSIEC 0x112e0
+#define DMIEC 0x112e4
+#define DDREC 0x112e8
+#define PEG0EC 0x112ec
+#define PEG1EC 0x112f0
+#define GFXEC 0x112f4
+#define RPPREVBSYTUPAVG 0x113b8
+#define RPPREVBSYTDNAVG 0x113bc
+#define ECR 0x11600
+#define ECR_GPFE (1UL<<31)
+#define ECR_IMONE (1<<30)
+#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
+#define OGW0 0x11608
+#define OGW1 0x1160c
+#define EG0 0x11610
+#define EG1 0x11614
+#define EG2 0x11618
+#define EG3 0x1161c
+#define EG4 0x11620
+#define EG5 0x11624
+#define EG6 0x11628
+#define EG7 0x1162c
+#define PXW 0x11664
+#define PXWL 0x11680
+#define LCFUSE02 0x116c0
+#define LCFUSE_HIV_MASK 0x000000ff
+#define CSIPLL0 0x12c10
+#define DDRMPLL1 0X12c20
+#define PEG_BAND_GAP_DATA 0x14d68
+
+#define GEN6_GT_THREAD_STATUS_REG 0x13805c
+#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
+#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
+
+#define GEN6_GT_PERF_STATUS 0x145948
+#define GEN6_RP_STATE_LIMITS 0x145994
+#define GEN6_RP_STATE_CAP 0x145998
+
+/*
+ * Logical Context regs
+ */
+#define CCID 0x2180
+#define CCID_EN (1<<0)
+#define CXT_SIZE 0x21a0
+#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
+#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
+#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
+#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
+#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
+#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \
+ GEN6_CXT_RING_SIZE(cxt_reg) + \
+ GEN6_CXT_RENDER_SIZE(cxt_reg) + \
+ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
+ GEN6_CXT_PIPELINE_SIZE(cxt_reg))
+#define GEN7_CXT_SIZE 0x21a8
+#define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f)
+#define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7)
+#define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f)
+#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
+#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
+#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
+#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \
+ GEN7_CXT_RING_SIZE(ctx_reg) + \
+ GEN7_CXT_RENDER_SIZE(ctx_reg) + \
+ GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
+ GEN7_CXT_GT1_SIZE(ctx_reg) + \
+ GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+/* Haswell does have the CXT_SIZE register however it does not appear to be
+ * valid. Now, docs explain in dwords what is in the context object. The full
+ * size is 70720 bytes, however, the power context and execlist context will
+ * never be saved (power context is stored elsewhere, and execlists don't work
+ * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
+ */
+#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
+
+/*
+ * Overlay regs
+ */
+
+#define OVADD 0x30000
+#define DOVSTA 0x30008
+#define OC_BUF (0x3<<20)
+#define OGAMC5 0x30010
+#define OGAMC4 0x30014
+#define OGAMC3 0x30018
+#define OGAMC2 0x3001c
+#define OGAMC1 0x30020
+#define OGAMC0 0x30024
+
+/*
+ * Display engine regs
+ */
+
+/* Pipe A timing regs */
+#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
+#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
+#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008)
+#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c)
+#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010)
+#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014)
+#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c)
+#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020)
+#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028)
+
+/* Pipe B timing regs */
+#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000)
+#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004)
+#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008)
+#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c)
+#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010)
+#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014)
+#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c)
+#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
+#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
+
+
+#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
+#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
+#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
+#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
+#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
+#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
+#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
+#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+
+/* VGA port control */
+#define ADPA 0x61100
+#define PCH_ADPA 0xe1100
+#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA)
+
+#define ADPA_DAC_ENABLE (1UL<<31) /* OSOL_i915 */
+#define ADPA_DAC_DISABLE 0
+#define ADPA_PIPE_SELECT_MASK (1<<30)
+#define ADPA_PIPE_A_SELECT 0
+#define ADPA_PIPE_B_SELECT (1<<30)
+#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
+/* CPT uses bits 29:30 for pch transcoder select */
+#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
+#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define ADPA_SETS_HVPOLARITY 0
+#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_VSYNC_CNTL_ENABLE 0
+#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_HSYNC_CNTL_ENABLE 0
+#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define ADPA_VSYNC_ACTIVE_LOW 0
+#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define ADPA_HSYNC_ACTIVE_LOW 0
+#define ADPA_DPMS_MASK (~(3<<10))
+#define ADPA_DPMS_ON (0<<10)
+#define ADPA_DPMS_SUSPEND (1<<10)
+#define ADPA_DPMS_STANDBY (2<<10)
+#define ADPA_DPMS_OFF (3<<10)
+
+
+/* Hotplug control (945+ only) */
+#define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110)
+#define PORTB_HOTPLUG_INT_EN (1 << 29)
+#define PORTC_HOTPLUG_INT_EN (1 << 28)
+#define PORTD_HOTPLUG_INT_EN (1 << 27)
+#define SDVOB_HOTPLUG_INT_EN (1 << 26)
+#define SDVOC_HOTPLUG_INT_EN (1 << 25)
+#define TV_HOTPLUG_INT_EN (1 << 18)
+#define CRT_HOTPLUG_INT_EN (1 << 9)
+#define HOTPLUG_INT_EN_MASK (PORTB_HOTPLUG_INT_EN | \
+ PORTC_HOTPLUG_INT_EN | \
+ PORTD_HOTPLUG_INT_EN | \
+ SDVOC_HOTPLUG_INT_EN | \
+ SDVOB_HOTPLUG_INT_EN | \
+ CRT_HOTPLUG_INT_EN)
+#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
+/* must use period 64 on GM45 according to docs */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
+
+#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
+/*
+ * HDMI/DP bits are gen4+
+ *
+ * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
+ * Please check the detailed lore in the commit message for for experimental
+ * evidence.
+ */
+#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
+#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
+#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
+#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
+/* CRT/TV common between gen3+ */
+#define CRT_HOTPLUG_INT_STATUS (1 << 11)
+#define TV_HOTPLUG_INT_STATUS (1 << 10)
+#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+/* SDVO is different across gen3/4 */
+#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
+#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
+/*
+ * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
+ * since reality corrobates that they're the same as on gen3. But keep these
+ * bits here (and the comment!) to help any other lost wanderers back onto the
+ * right tracks.
+ */
+#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
+#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
+#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
+#define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_G4X | \
+ SDVOC_HOTPLUG_INT_STATUS_G4X | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+#define HOTPLUG_INT_STATUS_I915 (CRT_HOTPLUG_INT_STATUS | \
+ SDVOB_HOTPLUG_INT_STATUS_I915 | \
+ SDVOC_HOTPLUG_INT_STATUS_I915 | \
+ PORTB_HOTPLUG_INT_STATUS | \
+ PORTC_HOTPLUG_INT_STATUS | \
+ PORTD_HOTPLUG_INT_STATUS)
+
+/* SDVO and HDMI port control.
+ * The same register may be used for SDVO or HDMI */
+#define GEN3_SDVOB 0x61140
+#define GEN3_SDVOC 0x61160
+#define GEN4_HDMIB GEN3_SDVOB
+#define GEN4_HDMIC GEN3_SDVOC
+#define PCH_SDVOB 0xe1140
+#define PCH_HDMIB PCH_SDVOB
+#define PCH_HDMIC 0xe1150
+#define PCH_HDMID 0xe1160
+
+/* Gen 3 SDVO bits: */
+#define SDVO_ENABLE (1UL << 31)
+#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
+#define SDVO_PIPE_SEL_MASK (1 << 30)
+#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_STALL_SELECT (1 << 29)
+#define SDVO_INTERRUPT_ENABLE (1 << 26)
+/**
+ * 915G/GM SDVO pixel multiplier.
+ * Programmed value is multiplier - 1, up to 5x.
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT 23
+#define SDVO_PHASE_SELECT_MASK (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+#define SDVOC_GANG_MODE (1 << 16) /* Port C only */
+#define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */
+#define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */
+#define SDVO_DETECTED (1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
+ SDVO_INTERRUPT_ENABLE)
+#define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
+
+/* Gen 4 SDVO/HDMI bits: */
+#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
+#define SDVO_ENCODING_SDVO (0 << 10)
+#define SDVO_ENCODING_HDMI (2 << 10)
+#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
+#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
+#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
+#define SDVO_AUDIO_ENABLE (1 << 6)
+/* VSYNC/HSYNC bits new with 965, default is to be set */
+#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
+#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
+
+/* Gen 5 (IBX) SDVO/HDMI bits: */
+#define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */
+#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
+
+/* Gen 6 (CPT) SDVO/HDMI bits: */
+#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
+#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
+
+/* DVO port control */
+#define DVOA 0x61120
+#define DVOB 0x61140
+#define DVOC 0x61160
+#define DVO_ENABLE (1UL << 31) /* OSOL_i915 */
+#define DVO_PIPE_B_SELECT (1 << 30)
+#define DVO_PIPE_STALL_UNUSED (0 << 28)
+#define DVO_PIPE_STALL (1 << 28)
+#define DVO_PIPE_STALL_TV (2 << 28)
+#define DVO_PIPE_STALL_MASK (3 << 28)
+#define DVO_USE_VGA_SYNC (1 << 15)
+#define DVO_DATA_ORDER_I740 (0 << 14)
+#define DVO_DATA_ORDER_FP (1 << 14)
+#define DVO_VSYNC_DISABLE (1 << 11)
+#define DVO_HSYNC_DISABLE (1 << 10)
+#define DVO_VSYNC_TRISTATE (1 << 9)
+#define DVO_HSYNC_TRISTATE (1 << 8)
+#define DVO_BORDER_ENABLE (1 << 7)
+#define DVO_DATA_ORDER_GBRG (1 << 6)
+#define DVO_DATA_ORDER_RGGB (0 << 6)
+#define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6)
+#define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6)
+#define DVO_VSYNC_ACTIVE_HIGH (1 << 4)
+#define DVO_HSYNC_ACTIVE_HIGH (1 << 3)
+#define DVO_BLANK_ACTIVE_HIGH (1 << 2)
+#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */
+#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */
+#define DVO_PRESERVE_MASK (0x7<<24)
+#define DVOA_SRCDIM 0x61124
+#define DVOB_SRCDIM 0x61144
+#define DVOC_SRCDIM 0x61164
+#define DVO_SRCDIM_HORIZONTAL_SHIFT 12
+#define DVO_SRCDIM_VERTICAL_SHIFT 0
+
+/* LVDS port control */
+#define LVDS 0x61180
+/*
+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define LVDS_PORT_EN (1UL << 31) /* OSOL_i915 */
+/* Selects pipe B for LVDS data. Must be set on pre-965. */
+#define LVDS_PIPEB_SELECT (1 << 30)
+#define LVDS_PIPE_MASK (1 << 30)
+#define LVDS_PIPE(pipe) ((pipe) << 30)
+/* LVDS dithering flag on 965/g4x platform */
+#define LVDS_ENABLE_DITHER (1 << 25)
+/* LVDS sync polarity flags. Set to invert (i.e. negative) */
+#define LVDS_VSYNC_POLARITY (1 << 21)
+#define LVDS_HSYNC_POLARITY (1 << 20)
+
+/* Enable border for unscaled (or aspect-scaled) display */
+#define LVDS_BORDER_ENABLE (1 << 15)
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
+#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
+#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define LVDS_A3_POWER_MASK (3 << 6)
+#define LVDS_A3_POWER_DOWN (0 << 6)
+#define LVDS_A3_POWER_UP (3 << 6)
+/*
+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define LVDS_CLKB_POWER_MASK (3 << 4)
+#define LVDS_CLKB_POWER_DOWN (0 << 4)
+#define LVDS_CLKB_POWER_UP (3 << 4)
+/*
+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode. The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define LVDS_B0B3_POWER_MASK (3 << 2)
+#define LVDS_B0B3_POWER_DOWN (0 << 2)
+#define LVDS_B0B3_POWER_UP (3 << 2)
+
+/* Video Data Island Packet control */
+#define VIDEO_DIP_DATA 0x61178
+/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_CTL 0x61170
+/* Pre HSW: */
+#define VIDEO_DIP_ENABLE (1UL << 31)
+#define VIDEO_DIP_PORT_B (1 << 29)
+#define VIDEO_DIP_PORT_C (2 << 29)
+#define VIDEO_DIP_PORT_D (3 << 29)
+#define VIDEO_DIP_PORT_MASK (3 << 29)
+#define VIDEO_DIP_ENABLE_GCP (1 << 25)
+#define VIDEO_DIP_ENABLE_AVI (1 << 21)
+#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
+#define VIDEO_DIP_ENABLE_GAMUT (4 << 21)
+#define VIDEO_DIP_ENABLE_SPD (8 << 21)
+#define VIDEO_DIP_SELECT_AVI (0 << 19)
+#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
+#define VIDEO_DIP_SELECT_SPD (3 << 19)
+#define VIDEO_DIP_SELECT_MASK (3 << 19)
+#define VIDEO_DIP_FREQ_ONCE (0 << 16)
+#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
+#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
+#define VIDEO_DIP_FREQ_MASK (3 << 16)
+/* HSW and later: */
+#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
+#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
+#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
+#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
+#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
+#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
+
+/* Panel power sequencing */
+#define PP_STATUS 0x61200
+#define PP_ON (1UL << 31) /* OSOL_i915 */
+/*
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define PP_READY (1 << 30)
+#define PP_SEQUENCE_NONE (0 << 28)
+#define PP_SEQUENCE_POWER_UP (1 << 28)
+#define PP_SEQUENCE_POWER_DOWN (2 << 28)
+#define PP_SEQUENCE_MASK (3 << 28)
+#define PP_SEQUENCE_SHIFT 28
+#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
+#define PP_SEQUENCE_STATE_MASK 0x0000000f
+#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
+#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
+#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
+#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
+#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
+#define PP_SEQUENCE_STATE_RESET (0xf << 0)
+#define PP_CONTROL 0x61204
+#define POWER_TARGET_ON (1 << 0)
+#define PP_ON_DELAYS 0x61208
+#define PP_OFF_DELAYS 0x6120c
+#define PP_DIVISOR 0x61210
+
+/* Panel fitting */
+#define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230)
+#define PFIT_ENABLE (1UL << 31)
+#define PFIT_PIPE_MASK (3 << 29)
+#define PFIT_PIPE_SHIFT 29
+#define VERT_INTERP_DISABLE (0 << 10)
+#define VERT_INTERP_BILINEAR (1 << 10)
+#define VERT_INTERP_MASK (3 << 10)
+#define VERT_AUTO_SCALE (1 << 9)
+#define HORIZ_INTERP_DISABLE (0 << 6)
+#define HORIZ_INTERP_BILINEAR (1 << 6)
+#define HORIZ_INTERP_MASK (3 << 6)
+#define HORIZ_AUTO_SCALE (1 << 5)
+#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
+#define PFIT_FILTER_FUZZY (0 << 24)
+#define PFIT_SCALING_AUTO (0 << 26)
+#define PFIT_SCALING_PROGRAMMED (1 << 26)
+#define PFIT_SCALING_PILLAR (2 << 26)
+#define PFIT_SCALING_LETTER (3 << 26)
+#define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234)
+/* Pre-965 */
+#define PFIT_VERT_SCALE_SHIFT 20
+#define PFIT_VERT_SCALE_MASK 0xfff00000
+#define PFIT_HORIZ_SCALE_SHIFT 4
+#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+/* 965+ */
+#define PFIT_VERT_SCALE_SHIFT_965 16
+#define PFIT_VERT_SCALE_MASK_965 0x1fff0000
+#define PFIT_HORIZ_SCALE_SHIFT_965 0
+#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
+
+#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
+
+/* Backlight control */
+#define BLC_PWM_CTL2 0x61250 /* 965+ only */
+#define BLM_PWM_ENABLE (1UL << 31)
+#define BLM_COMBINATION_MODE (1 << 30)
+#define BLM_PIPE_SELECT (1 << 29)
+#define BLM_PIPE_SELECT_IVB (3 << 29)
+#define BLM_PIPE_A (0 << 29)
+#define BLM_PIPE_B (1 << 29)
+#define BLM_PIPE_C (2 << 29) /* ivb + */
+#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */
+#define BLM_TRANSCODER_B BLM_PIPE_B
+#define BLM_TRANSCODER_C BLM_PIPE_C
+#define BLM_TRANSCODER_EDP (3 << 29)
+#define BLM_PIPE(pipe) ((pipe) << 29)
+#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
+#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
+#define BLM_PHASE_IN_ENABLE (1 << 25)
+#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
+#define BLM_PHASE_IN_TIME_BASE_SHIFT (16)
+#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
+#define BLM_PHASE_IN_COUNT_SHIFT (8)
+#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
+#define BLM_PHASE_IN_INCR_SHIFT (0)
+#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
+#define BLC_PWM_CTL 0x61254
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fffUL << 17) /* OSOL_i915 */
+#define BLM_LEGACY_MODE (1 << 16)
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
+#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
+
+#define BLC_HIST_CTL (dev_priv->info->display_mmio_offset + 0x61260)
+
+/* New registers for PCH-split platforms. Safe where new bits show up, the
+ * register layout machtes with gen4 BLC_PWM_CTL[12]. */
+#define BLC_PWM_CPU_CTL2 0x48250
+#define BLC_PWM_CPU_CTL 0x48254
+
+/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
+ * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
+#define BLC_PWM_PCH_CTL1 0xc8250
+#define BLM_PCH_PWM_ENABLE (1UL << 31)
+#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
+#define BLM_PCH_POLARITY (1 << 29)
+#define BLC_PWM_PCH_CTL2 0xc8254
+
+/* TV port control */
+#define TV_CTL 0x68000
+/** Enables the TV encoder */
+# define TV_ENC_ENABLE (1UL << 31) /* OSOL_i915 */
+/** Sources the TV encoder input from pipe B instead of A. */
+# define TV_ENC_PIPEB_SELECT (1 << 30)
+/** Outputs composite video (DAC A only) */
+# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
+/** Outputs SVideo video (DAC B/C) */
+# define TV_ENC_OUTPUT_SVIDEO (1 << 28)
+/** Outputs Component video (DAC A/B/C) */
+# define TV_ENC_OUTPUT_COMPONENT (2 << 28)
+/** Outputs Composite and SVideo (DAC A/B/C) */
+# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28)
+# define TV_TRILEVEL_SYNC (1 << 21)
+/** Enables slow sync generation (945GM only) */
+# define TV_SLOW_SYNC (1 << 20)
+/** Selects 4x oversampling for 480i and 576p */
+# define TV_OVERSAMPLE_4X (0 << 18)
+/** Selects 2x oversampling for 720p and 1080i */
+# define TV_OVERSAMPLE_2X (1 << 18)
+/** Selects no oversampling for 1080p */
+# define TV_OVERSAMPLE_NONE (2 << 18)
+/** Selects 8x oversampling */
+# define TV_OVERSAMPLE_8X (3 << 18)
+/** Selects progressive mode rather than interlaced */
+# define TV_PROGRESSIVE (1 << 17)
+/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */
+# define TV_PAL_BURST (1 << 16)
+/** Field for setting delay of Y compared to C */
+# define TV_YC_SKEW_MASK (7 << 12)
+/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
+# define TV_ENC_SDP_FIX (1 << 11)
+/**
+ * Enables a fix for the 915GM only.
+ *
+ * Not sure what it does.
+ */
+# define TV_ENC_C0_FIX (1 << 10)
+/** Bits that must be preserved by software */
+# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
+# define TV_FUSE_STATE_MASK (3 << 4)
+/** Read-only state that reports all features enabled */
+# define TV_FUSE_STATE_ENABLED (0 << 4)
+/** Read-only state that reports that Macrovision is disabled in hardware*/
+# define TV_FUSE_STATE_NO_MACROVISION (1 << 4)
+/** Read-only state that reports that TV-out is disabled in hardware. */
+# define TV_FUSE_STATE_DISABLED (2 << 4)
+/** Normal operation */
+# define TV_TEST_MODE_NORMAL (0 << 0)
+/** Encoder test pattern 1 - combo pattern */
+# define TV_TEST_MODE_PATTERN_1 (1 << 0)
+/** Encoder test pattern 2 - full screen vertical 75% color bars */
+# define TV_TEST_MODE_PATTERN_2 (2 << 0)
+/** Encoder test pattern 3 - full screen horizontal 75% color bars */
+# define TV_TEST_MODE_PATTERN_3 (3 << 0)
+/** Encoder test pattern 4 - random noise */
+# define TV_TEST_MODE_PATTERN_4 (4 << 0)
+/** Encoder test pattern 5 - linear color ramps */
+# define TV_TEST_MODE_PATTERN_5 (5 << 0)
+/**
+ * This test mode forces the DACs to 50% of full output.
+ *
+ * This is used for load detection in combination with TVDAC_SENSE_MASK
+ */
+# define TV_TEST_MODE_MONITOR_DETECT (7 << 0)
+# define TV_TEST_MODE_MASK (7 << 0)
+
+#define TV_DAC 0x68004
+# define TV_DAC_SAVE 0x00ffff00
+/**
+ * Reports that DAC state change logic has reported change (RO).
+ *
+ * This gets cleared when TV_DAC_STATE_EN is cleared
+*/
+# define TVDAC_STATE_CHG (1UL << 31) /* OSOL_i915 */
+# define TVDAC_SENSE_MASK (7 << 28)
+/** Reports that DAC A voltage is above the detect threshold */
+# define TVDAC_A_SENSE (1 << 30)
+/** Reports that DAC B voltage is above the detect threshold */
+# define TVDAC_B_SENSE (1 << 29)
+/** Reports that DAC C voltage is above the detect threshold */
+# define TVDAC_C_SENSE (1 << 28)
+/**
+ * Enables DAC state detection logic, for load-based TV detection.
+ *
+ * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
+ * to off, for load detection to work.
+ */
+# define TVDAC_STATE_CHG_EN (1 << 27)
+/** Sets the DAC A sense value to high */
+# define TVDAC_A_SENSE_CTL (1 << 26)
+/** Sets the DAC B sense value to high */
+# define TVDAC_B_SENSE_CTL (1 << 25)
+/** Sets the DAC C sense value to high */
+# define TVDAC_C_SENSE_CTL (1 << 24)
+/** Overrides the ENC_ENABLE and DAC voltage levels */
+# define DAC_CTL_OVERRIDE (1 << 7)
+/** Sets the slew rate. Must be preserved in software */
+# define ENC_TVDAC_SLEW_FAST (1 << 6)
+# define DAC_A_1_3_V (0 << 4)
+# define DAC_A_1_1_V (1 << 4)
+# define DAC_A_0_7_V (2 << 4)
+# define DAC_A_MASK (3 << 4)
+# define DAC_B_1_3_V (0 << 2)
+# define DAC_B_1_1_V (1 << 2)
+# define DAC_B_0_7_V (2 << 2)
+# define DAC_B_MASK (3 << 2)
+# define DAC_C_1_3_V (0 << 0)
+# define DAC_C_1_1_V (1 << 0)
+# define DAC_C_0_7_V (2 << 0)
+# define DAC_C_MASK (3 << 0)
+
+/**
+ * CSC coefficients are stored in a floating point format with 9 bits of
+ * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n,
+ * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
+ * -1 (0x3) being the only legal negative value.
+ */
+#define TV_CSC_Y 0x68010
+# define TV_RY_MASK 0x07ff0000
+# define TV_RY_SHIFT 16
+# define TV_GY_MASK 0x00000fff
+# define TV_GY_SHIFT 0
+
+#define TV_CSC_Y2 0x68014
+# define TV_BY_MASK 0x07ff0000
+# define TV_BY_SHIFT 16
+/**
+ * Y attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AY_MASK 0x000003ff
+# define TV_AY_SHIFT 0
+
+#define TV_CSC_U 0x68018
+# define TV_RU_MASK 0x07ff0000
+# define TV_RU_SHIFT 16
+# define TV_GU_MASK 0x000007ff
+# define TV_GU_SHIFT 0
+
+#define TV_CSC_U2 0x6801c
+# define TV_BU_MASK 0x07ff0000
+# define TV_BU_SHIFT 16
+/**
+ * U attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AU_MASK 0x000003ff
+# define TV_AU_SHIFT 0
+
+#define TV_CSC_V 0x68020
+# define TV_RV_MASK 0x0fff0000
+# define TV_RV_SHIFT 16
+# define TV_GV_MASK 0x000007ff
+# define TV_GV_SHIFT 0
+
+#define TV_CSC_V2 0x68024
+# define TV_BV_MASK 0x07ff0000
+# define TV_BV_SHIFT 16
+/**
+ * V attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AV_MASK 0x000007ff
+# define TV_AV_SHIFT 0
+
+#define TV_CLR_KNOBS 0x68028
+/** 2s-complement brightness adjustment */
+# define TV_BRIGHTNESS_MASK 0xff000000
+# define TV_BRIGHTNESS_SHIFT 24
+/** Contrast adjustment, as a 2.6 unsigned floating point number */
+# define TV_CONTRAST_MASK 0x00ff0000
+# define TV_CONTRAST_SHIFT 16
+/** Saturation adjustment, as a 2.6 unsigned floating point number */
+# define TV_SATURATION_MASK 0x0000ff00
+# define TV_SATURATION_SHIFT 8
+/** Hue adjustment, as an integer phase angle in degrees */
+# define TV_HUE_MASK 0x000000ff
+# define TV_HUE_SHIFT 0
+
+#define TV_CLR_LEVEL 0x6802c
+/** Controls the DAC level for black */
+# define TV_BLACK_LEVEL_MASK 0x01ff0000
+# define TV_BLACK_LEVEL_SHIFT 16
+/** Controls the DAC level for blanking */
+# define TV_BLANK_LEVEL_MASK 0x000001ff
+# define TV_BLANK_LEVEL_SHIFT 0
+
+#define TV_H_CTL_1 0x68030
+/** Number of pixels in the hsync. */
+# define TV_HSYNC_END_MASK 0x1fff0000
+# define TV_HSYNC_END_SHIFT 16
+/** Total number of pixels minus one in the line (display and blanking). */
+# define TV_HTOTAL_MASK 0x00001fff
+# define TV_HTOTAL_SHIFT 0
+
+#define TV_H_CTL_2 0x68034
+/** Enables the colorburst (needed for non-component color) */
+# define TV_BURST_ENA (1UL << 31) /* OSOL_i915 */
+/** Offset of the colorburst from the start of hsync, in pixels minus one. */
+# define TV_HBURST_START_SHIFT 16
+# define TV_HBURST_START_MASK 0x1fff0000
+/** Length of the colorburst */
+# define TV_HBURST_LEN_SHIFT 0
+# define TV_HBURST_LEN_MASK 0x0001fff
+
+#define TV_H_CTL_3 0x68038
+/** End of hblank, measured in pixels minus one from start of hsync */
+# define TV_HBLANK_END_SHIFT 16
+# define TV_HBLANK_END_MASK 0x1fff0000
+/** Start of hblank, measured in pixels minus one from start of hsync */
+# define TV_HBLANK_START_SHIFT 0
+# define TV_HBLANK_START_MASK 0x0001fff
+
+#define TV_V_CTL_1 0x6803c
+/** XXX */
+# define TV_NBR_END_SHIFT 16
+# define TV_NBR_END_MASK 0x07ff0000
+/** XXX */
+# define TV_VI_END_F1_SHIFT 8
+# define TV_VI_END_F1_MASK 0x00003f00
+/** XXX */
+# define TV_VI_END_F2_SHIFT 0
+# define TV_VI_END_F2_MASK 0x0000003f
+
+#define TV_V_CTL_2 0x68040
+/** Length of vsync, in half lines */
+# define TV_VSYNC_LEN_MASK 0x07ff0000
+# define TV_VSYNC_LEN_SHIFT 16
+/** Offset of the start of vsync in field 1, measured in one less than the
+ * number of half lines.
+ */
+# define TV_VSYNC_START_F1_MASK 0x00007f00
+# define TV_VSYNC_START_F1_SHIFT 8
+/**
+ * Offset of the start of vsync in field 2, measured in one less than the
+ * number of half lines.
+ */
+# define TV_VSYNC_START_F2_MASK 0x0000007f
+# define TV_VSYNC_START_F2_SHIFT 0
+
+#define TV_V_CTL_3 0x68044
+/** Enables generation of the equalization signal */
+# define TV_EQUAL_ENA (1UL << 31) /* OSOL_i915 */
+/** Length of vsync, in half lines */
+# define TV_VEQ_LEN_MASK 0x007f0000
+# define TV_VEQ_LEN_SHIFT 16
+/** Offset of the start of equalization in field 1, measured in one less than
+ * the number of half lines.
+ */
+# define TV_VEQ_START_F1_MASK 0x0007f00
+# define TV_VEQ_START_F1_SHIFT 8
+/**
+ * Offset of the start of equalization in field 2, measured in one less than
+ * the number of half lines.
+ */
+# define TV_VEQ_START_F2_MASK 0x000007f
+# define TV_VEQ_START_F2_SHIFT 0
+
+#define TV_V_CTL_4 0x68048
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F1_MASK 0x003f0000
+# define TV_VBURST_START_F1_SHIFT 16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F1_MASK 0x000000ff
+# define TV_VBURST_END_F1_SHIFT 0
+
+#define TV_V_CTL_5 0x6804c
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F2_MASK 0x003f0000
+# define TV_VBURST_START_F2_SHIFT 16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F2_MASK 0x000000ff
+# define TV_VBURST_END_F2_SHIFT 0
+
+#define TV_V_CTL_6 0x68050
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F3_MASK 0x003f0000
+# define TV_VBURST_START_F3_SHIFT 16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F3_MASK 0x000000ff
+# define TV_VBURST_END_F3_SHIFT 0
+
+#define TV_V_CTL_7 0x68054
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F4_MASK 0x003f0000
+# define TV_VBURST_START_F4_SHIFT 16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F4_MASK 0x000000ff
+# define TV_VBURST_END_F4_SHIFT 0
+
+#define TV_SC_CTL_1 0x68060
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA1_EN (1UL << 31) /* OSOL_i915 */
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA2_EN (1 << 30)
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA3_EN (1 << 29)
+/** Sets the subcarrier DDA to reset frequency every other field */
+# define TV_SC_RESET_EVERY_2 (0 << 24)
+/** Sets the subcarrier DDA to reset frequency every fourth field */
+# define TV_SC_RESET_EVERY_4 (1 << 24)
+/** Sets the subcarrier DDA to reset frequency every eighth field */
+# define TV_SC_RESET_EVERY_8 (2 << 24)
+/** Sets the subcarrier DDA to never reset the frequency */
+# define TV_SC_RESET_NEVER (3 << 24)
+/** Sets the peak amplitude of the colorburst.*/
+# define TV_BURST_LEVEL_MASK 0x00ff0000
+# define TV_BURST_LEVEL_SHIFT 16
+/** Sets the increment of the first subcarrier phase generation DDA */
+# define TV_SCDDA1_INC_MASK 0x00000fff
+# define TV_SCDDA1_INC_SHIFT 0
+
+#define TV_SC_CTL_2 0x68064
+/** Sets the rollover for the second subcarrier phase generation DDA */
+# define TV_SCDDA2_SIZE_MASK 0x7fff0000
+# define TV_SCDDA2_SIZE_SHIFT 16
+/** Sets the increent of the second subcarrier phase generation DDA */
+# define TV_SCDDA2_INC_MASK 0x00007fff
+# define TV_SCDDA2_INC_SHIFT 0
+
+#define TV_SC_CTL_3 0x68068
+/** Sets the rollover for the third subcarrier phase generation DDA */
+# define TV_SCDDA3_SIZE_MASK 0x7fff0000
+# define TV_SCDDA3_SIZE_SHIFT 16
+/** Sets the increent of the third subcarrier phase generation DDA */
+# define TV_SCDDA3_INC_MASK 0x00007fff
+# define TV_SCDDA3_INC_SHIFT 0
+
+#define TV_WIN_POS 0x68070
+/** X coordinate of the display from the start of horizontal active */
+# define TV_XPOS_MASK 0x1fff0000
+# define TV_XPOS_SHIFT 16
+/** Y coordinate of the display from the start of vertical active (NBR) */
+# define TV_YPOS_MASK 0x00000fff
+# define TV_YPOS_SHIFT 0
+
+#define TV_WIN_SIZE 0x68074
+/** Horizontal size of the display window, measured in pixels*/
+# define TV_XSIZE_MASK 0x1fff0000
+# define TV_XSIZE_SHIFT 16
+/**
+ * Vertical size of the display window, measured in pixels.
+ *
+ * Must be even for interlaced modes.
+ */
+# define TV_YSIZE_MASK 0x00000fff
+# define TV_YSIZE_SHIFT 0
+
+#define TV_FILTER_CTL_1 0x68080
+/**
+ * Enables automatic scaling calculation.
+ *
+ * If set, the rest of the registers are ignored, and the calculated values can
+ * be read back from the register.
+ */
+# define TV_AUTO_SCALE (1UL << 31) /* OSOL_i915 */
+/**
+ * Disables the vertical filter.
+ *
+ * This is required on modes more than 1024 pixels wide */
+# define TV_V_FILTER_BYPASS (1 << 29)
+/** Enables adaptive vertical filtering */
+# define TV_VADAPT (1 << 28)
+# define TV_VADAPT_MODE_MASK (3 << 26)
+/** Selects the least adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_LEAST (0 << 26)
+/** Selects the moderately adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_MODERATE (1 << 26)
+/** Selects the most adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_MOST (3 << 26)
+/**
+ * Sets the horizontal scaling factor.
+ *
+ * This should be the fractional part of the horizontal scaling factor divided
+ * by the oversampling rate. TV_HSCALE should be less than 1, and set to:
+ *
+ * (src width - 1) / ((oversample * dest width) - 1)
+ */
+# define TV_HSCALE_FRAC_MASK 0x00003fff
+# define TV_HSCALE_FRAC_SHIFT 0
+
+#define TV_FILTER_CTL_2 0x68084
+/**
+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
+ */
+# define TV_VSCALE_INT_MASK 0x00038000
+# define TV_VSCALE_INT_SHIFT 15
+/**
+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * \sa TV_VSCALE_INT_MASK
+ */
+# define TV_VSCALE_FRAC_MASK 0x00007fff
+# define TV_VSCALE_FRAC_SHIFT 0
+
+#define TV_FILTER_CTL_3 0x68088
+/**
+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
+ *
+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
+ */
+# define TV_VSCALE_IP_INT_MASK 0x00038000
+# define TV_VSCALE_IP_INT_SHIFT 15
+/**
+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
+ *
+ * \sa TV_VSCALE_IP_INT_MASK
+ */
+# define TV_VSCALE_IP_FRAC_MASK 0x00007fff
+# define TV_VSCALE_IP_FRAC_SHIFT 0
+
+#define TV_CC_CONTROL 0x68090
+# define TV_CC_ENABLE (1UL << 31) /* OSOL_i915 */
+/**
+ * Specifies which field to send the CC data in.
+ *
+ * CC data is usually sent in field 0.
+ */
+# define TV_CC_FID_MASK (1 << 27)
+# define TV_CC_FID_SHIFT 27
+/** Sets the horizontal position of the CC data. Usually 135. */
+# define TV_CC_HOFF_MASK 0x03ff0000
+# define TV_CC_HOFF_SHIFT 16
+/** Sets the vertical position of the CC data. Usually 21 */
+# define TV_CC_LINE_MASK 0x0000003f
+# define TV_CC_LINE_SHIFT 0
+
+#define TV_CC_DATA 0x68094
+# define TV_CC_RDY (1UL << 31) /* OSOL_i915 */
+/** Second word of CC data to be transmitted. */
+# define TV_CC_DATA_2_MASK 0x007f0000
+# define TV_CC_DATA_2_SHIFT 16
+/** First word of CC data to be transmitted. */
+# define TV_CC_DATA_1_MASK 0x0000007f
+# define TV_CC_DATA_1_SHIFT 0
+
+#define TV_H_LUMA_0 0x68100
+#define TV_H_LUMA_59 0x681ec
+#define TV_H_CHROMA_0 0x68200
+#define TV_H_CHROMA_59 0x682ec
+#define TV_V_LUMA_0 0x68300
+#define TV_V_LUMA_42 0x683a8
+#define TV_V_CHROMA_0 0x68400
+#define TV_V_CHROMA_42 0x684a8
+
+/* Display Port */
+#define DP_A 0x64000 /* eDP */
+#define DP_B 0x64100
+#define DP_C 0x64200
+#define DP_D 0x64300
+
+#define DP_PORT_EN (1UL << 31) /* OSOL_i915 */
+#define DP_PIPEB_SELECT (1 << 30)
+#define DP_PIPE_MASK (1 << 30)
+
+/* Link training mode - select a suitable mode for each stage */
+#define DP_LINK_TRAIN_PAT_1 (0 << 28)
+#define DP_LINK_TRAIN_PAT_2 (1 << 28)
+#define DP_LINK_TRAIN_PAT_IDLE (2 << 28)
+#define DP_LINK_TRAIN_OFF (3 << 28)
+#define DP_LINK_TRAIN_MASK (3 << 28)
+#define DP_LINK_TRAIN_SHIFT 28
+
+/* CPT Link training mode */
+#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
+#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
+#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
+#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
+#define DP_LINK_TRAIN_SHIFT_CPT 8
+
+/* Signal voltages. These are mostly controlled by the other end */
+#define DP_VOLTAGE_0_4 (0 << 25)
+#define DP_VOLTAGE_0_6 (1 << 25)
+#define DP_VOLTAGE_0_8 (2 << 25)
+#define DP_VOLTAGE_1_2 (3 << 25)
+#define DP_VOLTAGE_MASK (7 << 25)
+#define DP_VOLTAGE_SHIFT 25
+
+/* Signal pre-emphasis levels, like voltages, the other end tells us what
+ * they want
+ */
+#define DP_PRE_EMPHASIS_0 (0 << 22)
+#define DP_PRE_EMPHASIS_3_5 (1 << 22)
+#define DP_PRE_EMPHASIS_6 (2 << 22)
+#define DP_PRE_EMPHASIS_9_5 (3 << 22)
+#define DP_PRE_EMPHASIS_MASK (7 << 22)
+#define DP_PRE_EMPHASIS_SHIFT 22
+
+/* How many wires to use. I guess 3 was too hard */
+#define DP_PORT_WIDTH(width) (((width) - 1) << 19)
+#define DP_PORT_WIDTH_MASK (7 << 19)
+
+/* Mystic DPCD version 1.1 special mode */
+#define DP_ENHANCED_FRAMING (1 << 18)
+
+/* eDP */
+#define DP_PLL_FREQ_270MHZ (0 << 16)
+#define DP_PLL_FREQ_160MHZ (1 << 16)
+#define DP_PLL_FREQ_MASK (3 << 16)
+
+/** locked once port is enabled */
+#define DP_PORT_REVERSAL (1 << 15)
+
+/* eDP */
+#define DP_PLL_ENABLE (1 << 14)
+
+/** sends the clock on lane 15 of the PEG for debug */
+#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
+
+#define DP_SCRAMBLING_DISABLE (1 << 12)
+#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
+
+/** limit RGB values to avoid confusing TVs */
+#define DP_COLOR_RANGE_16_235 (1 << 8)
+
+/** Turn on the audio link */
+#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
+
+/** vs and hs sync polarity */
+#define DP_SYNC_VS_HIGH (1 << 4)
+#define DP_SYNC_HS_HIGH (1 << 3)
+
+/** A fantasy */
+#define DP_DETECTED (1 << 2)
+
+/** The aux channel provides a way to talk to the
+ * signal sink for DDC etc. Max packet size supported
+ * is 20 bytes in each direction, hence the 5 fixed
+ * data registers
+ */
+#define DPA_AUX_CH_CTL 0x64010
+#define DPA_AUX_CH_DATA1 0x64014
+#define DPA_AUX_CH_DATA2 0x64018
+#define DPA_AUX_CH_DATA3 0x6401c
+#define DPA_AUX_CH_DATA4 0x64020
+#define DPA_AUX_CH_DATA5 0x64024
+
+#define DPB_AUX_CH_CTL 0x64110
+#define DPB_AUX_CH_DATA1 0x64114
+#define DPB_AUX_CH_DATA2 0x64118
+#define DPB_AUX_CH_DATA3 0x6411c
+#define DPB_AUX_CH_DATA4 0x64120
+#define DPB_AUX_CH_DATA5 0x64124
+
+#define DPC_AUX_CH_CTL 0x64210
+#define DPC_AUX_CH_DATA1 0x64214
+#define DPC_AUX_CH_DATA2 0x64218
+#define DPC_AUX_CH_DATA3 0x6421c
+#define DPC_AUX_CH_DATA4 0x64220
+#define DPC_AUX_CH_DATA5 0x64224
+
+#define DPD_AUX_CH_CTL 0x64310
+#define DPD_AUX_CH_DATA1 0x64314
+#define DPD_AUX_CH_DATA2 0x64318
+#define DPD_AUX_CH_DATA3 0x6431c
+#define DPD_AUX_CH_DATA4 0x64320
+#define DPD_AUX_CH_DATA5 0x64324
+
+#define DP_AUX_CH_CTL_SEND_BUSY (1UL << 31) /* OSOL_i915 */
+#define DP_AUX_CH_CTL_DONE (1 << 30)
+#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
+#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28)
+#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
+#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
+#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
+#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
+#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16)
+#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16
+#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15)
+#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14)
+#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13)
+#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12)
+#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
+#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
+#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+#define _PIPEA_DATA_M_G4X 0x70050
+#define _PIPEB_DATA_M_G4X 0x71050
+
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
+#define TU_SIZE_SHIFT 25
+#define TU_SIZE_MASK (0x3f << 25)
+
+#define DATA_LINK_M_N_MASK (0xffffff)
+#define DATA_LINK_N_MAX (0x800000)
+
+#define _PIPEA_DATA_N_G4X 0x70054
+#define _PIPEB_DATA_N_G4X 0x71054
+#define PIPE_GMCH_DATA_N_MASK (0xffffff)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+
+#define _PIPEA_LINK_M_G4X 0x70060
+#define _PIPEB_LINK_M_G4X 0x71060
+#define PIPEA_DP_LINK_M_MASK (0xffffff)
+
+#define _PIPEA_LINK_N_G4X 0x70064
+#define _PIPEB_LINK_N_G4X 0x71064
+#define PIPEA_DP_LINK_N_MASK (0xffffff)
+
+#define PIPE_DATA_M_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
+#define PIPE_DATA_N_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
+#define PIPE_LINK_M_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
+#define PIPE_LINK_N_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
+
+/* Display & cursor control */
+
+/* Pipe A */
+#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000)
+#define DSL_LINEMASK_GEN2 0x00000fff
+#define DSL_LINEMASK_GEN3 0x00001fff
+#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008)
+#define PIPECONF_ENABLE (1UL<<31) /* OSOL_i915 */
+#define PIPECONF_DISABLE 0
+#define PIPECONF_DOUBLE_WIDE (1<<30)
+#define I965_PIPECONF_ACTIVE (1<<30)
+#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
+#define PIPECONF_SINGLE_WIDE 0
+#define PIPECONF_PIPE_UNLOCKED 0
+#define PIPECONF_PIPE_LOCKED (1<<25)
+#define PIPECONF_PALETTE 0
+#define PIPECONF_GAMMA (1<<24)
+#define PIPECONF_FORCE_BORDER (1<<25)
+#define PIPECONF_INTERLACE_MASK (7 << 21)
+#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
+/* Note that pre-gen3 does not support interlaced display directly. Panel
+ * fitting must be disabled on pre-ilk for interlaced. */
+#define PIPECONF_PROGRESSIVE (0 << 21)
+#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */
+#define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
+/* Ironlake and later have a complete new set of values for interlaced. PFIT
+ * means panel fitter required, PF means progressive fetch, DBL means power
+ * saving pixel doubling. */
+#define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21)
+#define PIPECONF_INTERLACED_ILK (3 << 21)
+#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
+#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
+#define PIPECONF_INTERLACE_MODE_MASK (7 << 21)
+#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
+#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
+#define PIPECONF_BPC_MASK (0x7 << 5)
+#define PIPECONF_8BPC (0<<5)
+#define PIPECONF_10BPC (1<<5)
+#define PIPECONF_6BPC (2<<5)
+#define PIPECONF_12BPC (3<<5)
+#define PIPECONF_DITHER_EN (1<<4)
+#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
+#define PIPECONF_DITHER_TYPE_SP (0<<2)
+#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
+#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
+#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
+#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024)
+#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
+#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
+#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
+#define PIPE_CRC_DONE_ENABLE (1UL<<28)
+#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
+#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
+#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
+#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
+#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
+#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
+#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
+#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
+#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
+#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
+#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14)
+#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
+#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
+#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
+#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10)
+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
+#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
+#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
+#define PIPE_DPST_EVENT_STATUS (1UL<<7)
+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
+#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
+#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
+#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
+#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
+
+#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
+#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
+#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
+#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
+#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
+
+#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
+#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
+#define PIPEB_HLINE_INT_EN (1<<28)
+#define PIPEB_VBLANK_INT_EN (1<<27)
+#define SPRITED_FLIPDONE_INT_EN (1<<26)
+#define SPRITEC_FLIPDONE_INT_EN (1<<25)
+#define PLANEB_FLIPDONE_INT_EN (1<<24)
+#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
+#define PIPEA_HLINE_INT_EN (1<<20)
+#define PIPEA_VBLANK_INT_EN (1<<19)
+#define SPRITEB_FLIPDONE_INT_EN (1<<18)
+#define SPRITEA_FLIPDONE_INT_EN (1<<17)
+#define PLANEA_FLIPDONE_INT_EN (1<<16)
+
+#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
+#define CURSORB_INVALID_GTT_INT_EN (1<<23)
+#define CURSORA_INVALID_GTT_INT_EN (1<<22)
+#define SPRITED_INVALID_GTT_INT_EN (1<<21)
+#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
+#define PLANEB_INVALID_GTT_INT_EN (1<<19)
+#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
+#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
+#define PLANEA_INVALID_GTT_INT_EN (1<<16)
+#define DPINVGTT_EN_MASK 0xff0000
+#define CURSORB_INVALID_GTT_STATUS (1<<7)
+#define CURSORA_INVALID_GTT_STATUS (1<<6)
+#define SPRITED_INVALID_GTT_STATUS (1<<5)
+#define SPRITEC_INVALID_GTT_STATUS (1<<4)
+#define PLANEB_INVALID_GTT_STATUS (1<<3)
+#define SPRITEB_INVALID_GTT_STATUS (1<<2)
+#define SPRITEA_INVALID_GTT_STATUS (1<<1)
+#define PLANEA_INVALID_GTT_STATUS (1<<0)
+#define DPINVGTT_STATUS_MASK 0xff
+
+#define DSPARB 0x70030
+#define DSPARB_CSTART_MASK (0x7f << 7)
+#define DSPARB_CSTART_SHIFT 7
+#define DSPARB_BSTART_MASK (0x7f)
+#define DSPARB_BSTART_SHIFT 0
+#define DSPARB_BEND_SHIFT 9 /* on 855 */
+#define DSPARB_AEND_SHIFT 0
+
+#define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034)
+#define DSPFW_SR_SHIFT 23
+#define DSPFW_SR_MASK (0x1ffUL<<23)
+#define DSPFW_CURSORB_SHIFT 16
+#define DSPFW_CURSORB_MASK (0x3f<<16)
+#define DSPFW_PLANEB_SHIFT 8
+#define DSPFW_PLANEB_MASK (0x7f<<8)
+#define DSPFW_PLANEA_MASK (0x7f)
+#define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038)
+#define DSPFW_CURSORA_MASK 0x00003f00
+#define DSPFW_CURSORA_SHIFT 8
+#define DSPFW_PLANEC_MASK (0x7f)
+#define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c)
+#define DSPFW_HPLL_SR_EN (1UL<<31) /* OSOL_i915 */
+#define DSPFW_CURSOR_SR_SHIFT 24
+#define PINEVIEW_SELF_REFRESH_EN (1<<30)
+#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
+#define DSPFW_HPLL_CURSOR_SHIFT 16
+#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
+#define DSPFW_HPLL_SR_MASK (0x1ff)
+#define DSPFW4 (dev_priv->info->display_mmio_offset + 0x70070)
+#define DSPFW7 (dev_priv->info->display_mmio_offset + 0x7007c)
+
+/* drain latency register values*/
+#define DRAIN_LATENCY_PRECISION_32 32
+#define DRAIN_LATENCY_PRECISION_16 16
+#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050)
+#define DDL_CURSORA_PRECISION_32 (1UL<<31)
+#define DDL_CURSORA_PRECISION_16 (0<<31)
+#define DDL_CURSORA_SHIFT 24
+#define DDL_PLANEA_PRECISION_32 (1<<7)
+#define DDL_PLANEA_PRECISION_16 (0<<7)
+#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
+#define DDL_CURSORB_PRECISION_32 (1UL<<31)
+#define DDL_CURSORB_PRECISION_16 (0<<31)
+#define DDL_CURSORB_SHIFT 24
+#define DDL_PLANEB_PRECISION_32 (1<<7)
+#define DDL_PLANEB_PRECISION_16 (0<<7)
+
+/* FIFO watermark sizes etc */
+#define G4X_FIFO_LINE_SIZE 64
+#define I915_FIFO_LINE_SIZE 64
+#define I830_FIFO_LINE_SIZE 32
+
+#define VALLEYVIEW_FIFO_SIZE 255
+#define G4X_FIFO_SIZE 127
+#define I965_FIFO_SIZE 512
+#define I945_FIFO_SIZE 127 /* 945 & 965 */
+#define I915_FIFO_SIZE 95
+#define I855GM_FIFO_SIZE 127 /* In cachelines */
+#define I830_FIFO_SIZE 95
+
+#define VALLEYVIEW_MAX_WM 0xff
+#define G4X_MAX_WM 0x3f
+#define I915_MAX_WM 0x3f
+
+#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
+#define PINEVIEW_FIFO_LINE_SIZE 64
+#define PINEVIEW_MAX_WM 0x1ff
+#define PINEVIEW_DFT_WM 0x3f
+#define PINEVIEW_DFT_HPLLOFF_WM 0
+#define PINEVIEW_GUARD_WM 10
+#define PINEVIEW_CURSOR_FIFO 64
+#define PINEVIEW_CURSOR_MAX_WM 0x3f
+#define PINEVIEW_CURSOR_DFT_WM 0
+#define PINEVIEW_CURSOR_GUARD_WM 5
+
+#define VALLEYVIEW_CURSOR_MAX_WM 64
+#define I965_CURSOR_FIFO 64
+#define I965_CURSOR_MAX_WM 32
+#define I965_CURSOR_DFT_WM 8
+
+/* define the Watermark register on Ironlake */
+#define WM0_PIPEA_ILK 0x45100
+#define WM0_PIPE_PLANE_MASK (0x7f<<16)
+#define WM0_PIPE_PLANE_SHIFT 16
+#define WM0_PIPE_SPRITE_MASK (0x3f<<8)
+#define WM0_PIPE_SPRITE_SHIFT 8
+#define WM0_PIPE_CURSOR_MASK (0x1f)
+
+#define WM0_PIPEB_ILK 0x45104
+#define WM0_PIPEC_IVB 0x45200
+#define WM1_LP_ILK 0x45108
+#define WM1_LP_SR_EN (1UL<<31)
+#define WM1_LP_LATENCY_SHIFT 24
+#define WM1_LP_LATENCY_MASK (0x7f<<24)
+#define WM1_LP_FBC_MASK (0xf<<20)
+#define WM1_LP_FBC_SHIFT 20
+#define WM1_LP_SR_MASK (0x1ff<<8)
+#define WM1_LP_SR_SHIFT 8
+#define WM1_LP_CURSOR_MASK (0x3f)
+#define WM2_LP_ILK 0x4510c
+#define WM2_LP_EN (1UL<<31)
+#define WM3_LP_ILK 0x45110
+#define WM3_LP_EN (1UL<<31)
+#define WM1S_LP_ILK 0x45120
+#define WM2S_LP_IVB 0x45124
+#define WM3S_LP_IVB 0x45128
+#define WM1S_LP_EN (1UL<<31)
+
+#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
+ (WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
+ ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur))
+
+/* Memory latency timer register */
+#define MLTR_ILK 0x11222
+#define MLTR_WM1_SHIFT 0
+#define MLTR_WM2_SHIFT 8
+/* the unit of memory self-refresh latency time is 0.5us */
+#define ILK_SRLT_MASK 0x3f
+#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
+#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
+#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
+
+/* define the fifo size on Ironlake */
+#define ILK_DISPLAY_FIFO 128
+#define ILK_DISPLAY_MAXWM 64
+#define ILK_DISPLAY_DFTWM 8
+#define ILK_CURSOR_FIFO 32
+#define ILK_CURSOR_MAXWM 16
+#define ILK_CURSOR_DFTWM 8
+
+#define ILK_DISPLAY_SR_FIFO 512
+#define ILK_DISPLAY_MAX_SRWM 0x1ff
+#define ILK_DISPLAY_DFT_SRWM 0x3f
+#define ILK_CURSOR_SR_FIFO 64
+#define ILK_CURSOR_MAX_SRWM 0x3f
+#define ILK_CURSOR_DFT_SRWM 8
+
+#define ILK_FIFO_LINE_SIZE 64
+
+/* define the WM info on Sandybridge */
+#define SNB_DISPLAY_FIFO 128
+#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */
+#define SNB_DISPLAY_DFTWM 8
+#define SNB_CURSOR_FIFO 32
+#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */
+#define SNB_CURSOR_DFTWM 8
+
+#define SNB_DISPLAY_SR_FIFO 512
+#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */
+#define SNB_DISPLAY_DFT_SRWM 0x3f
+#define SNB_CURSOR_SR_FIFO 64
+#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */
+#define SNB_CURSOR_DFT_SRWM 8
+
+#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */
+
+#define SNB_FIFO_LINE_SIZE 64
+
+
+/* the address where we get all kinds of latency value */
+#define SSKPD 0x5d10
+#define SSKPD_WM_MASK 0x3f
+#define SSKPD_WM0_SHIFT 0
+#define SSKPD_WM1_SHIFT 8
+#define SSKPD_WM2_SHIFT 16
+#define SSKPD_WM3_SHIFT 24
+
+#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
+#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
+#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
+#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
+#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
+
+/*
+ * The two pipe frame counter registers are not synchronized, so
+ * reading a stable value is somewhat tricky. The following code
+ * should work:
+ *
+ * do {
+ * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ * PIPE_FRAME_HIGH_SHIFT;
+ * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
+ * PIPE_FRAME_LOW_SHIFT);
+ * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ * PIPE_FRAME_HIGH_SHIFT);
+ * } while (high1 != high2);
+ * frame = (high1 << 8) | low1;
+ */
+#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040)
+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT 0
+#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044)
+#define PIPE_FRAME_LOW_MASK 0xff000000
+#define PIPE_FRAME_LOW_SHIFT 24
+#define PIPE_PIXEL_MASK 0x00ffffff
+#define PIPE_PIXEL_SHIFT 0
+/* GM45+ just has to be different */
+#define _PIPEA_FRMCOUNT_GM45 0x70040
+#define _PIPEA_FLIPCOUNT_GM45 0x70044
+#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
+
+/* Cursor A & B regs */
+#define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080)
+/* Old style CUR*CNTR flags (desktop 8xx) */
+#define CURSOR_ENABLE 0x80000000
+#define CURSOR_GAMMA_ENABLE 0x40000000
+#define CURSOR_STRIDE_MASK 0x30000000
+#define CURSOR_PIPE_CSC_ENABLE (1<<24)
+#define CURSOR_FORMAT_SHIFT 24
+#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_3C (0x01 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_4C (0x02 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT)
+#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT)
+/* New style CUR*CNTR flags */
+#define CURSOR_MODE 0x27
+#define CURSOR_MODE_DISABLE 0x00
+#define CURSOR_MODE_64_32B_AX 0x07
+#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_PIPE_SELECT (1 << 28)
+#define MCURSOR_PIPE_A 0x00
+#define MCURSOR_PIPE_B (1 << 28)
+#define MCURSOR_GAMMA_ENABLE (1 << 26)
+#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
+#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
+#define CURSOR_POS_MASK 0x007FF
+#define CURSOR_POS_SIGN 0x8000UL /* OSOL_i915 */
+#define CURSOR_X_SHIFT 0
+#define CURSOR_Y_SHIFT 16
+#define CURSIZE 0x700a0
+#define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0)
+#define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4)
+#define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8)
+
+#define _CURBCNTR_IVB 0x71080
+#define _CURBBASE_IVB 0x71084
+#define _CURBPOS_IVB 0x71088
+
+#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
+#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
+#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
+
+#define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB)
+#define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB)
+#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
+
+/* Display A control */
+#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180)
+#define DISPLAY_PLANE_ENABLE (1UL<<31) /* OSOL_i915 */
+#define DISPLAY_PLANE_DISABLE 0
+#define DISPPLANE_GAMMA_ENABLE (1<<30)
+#define DISPPLANE_GAMMA_DISABLE 0
+#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
+#define DISPPLANE_YUV422 (0x0<<26)
+#define DISPPLANE_8BPP (0x2<<26)
+#define DISPPLANE_BGRA555 (0x3<<26)
+#define DISPPLANE_BGRX555 (0x4<<26)
+#define DISPPLANE_BGRX565 (0x5<<26)
+#define DISPPLANE_BGRX888 (0x6<<26)
+#define DISPPLANE_BGRA888 (0x7<<26)
+#define DISPPLANE_RGBX101010 (0x8<<26)
+#define DISPPLANE_RGBA101010 (0x9<<26)
+#define DISPPLANE_BGRX101010 (0xa<<26)
+#define DISPPLANE_RGBX161616 (0xc<<26)
+#define DISPPLANE_RGBX888 (0xe<<26)
+#define DISPPLANE_RGBA888 (0xf<<26)
+#define DISPPLANE_STEREO_ENABLE (1<<25)
+#define DISPPLANE_STEREO_DISABLE 0
+#define DISPPLANE_PIPE_CSC_ENABLE (1<<24)
+#define DISPPLANE_SEL_PIPE_SHIFT 24
+#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
+#define DISPPLANE_SEL_PIPE_A 0
+#define DISPPLANE_SEL_PIPE_B (1<<DISPPLANE_SEL_PIPE_SHIFT)
+#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
+#define DISPPLANE_SRC_KEY_DISABLE 0
+#define DISPPLANE_LINE_DOUBLE (1<<20)
+#define DISPPLANE_NO_LINE_DOUBLE 0
+#define DISPPLANE_STEREO_POLARITY_FIRST 0
+#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
+#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
+#define DISPPLANE_TILED (1<<10)
+#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184)
+#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188)
+#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */
+#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190)
+#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */
+#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */
+#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */
+#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC)
+
+#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
+#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
+#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE)
+#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS)
+#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
+#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
+#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
+#define DSPLINOFF(plane) DSPADDR(plane)
+#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
+#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
+
+/* Display/Sprite base address macros */
+#define DISP_BASEADDR_MASK (0xfffff000)
+#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
+#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
+ (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
+
+/* VBIOS flags */
+#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410)
+#define SWF01 (dev_priv->info->display_mmio_offset + 0x71414)
+#define SWF02 (dev_priv->info->display_mmio_offset + 0x71418)
+#define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c)
+#define SWF04 (dev_priv->info->display_mmio_offset + 0x71420)
+#define SWF05 (dev_priv->info->display_mmio_offset + 0x71424)
+#define SWF06 (dev_priv->info->display_mmio_offset + 0x71428)
+#define SWF10 (dev_priv->info->display_mmio_offset + 0x70410)
+#define SWF11 (dev_priv->info->display_mmio_offset + 0x70414)
+#define SWF14 (dev_priv->info->display_mmio_offset + 0x71420)
+#define SWF30 (dev_priv->info->display_mmio_offset + 0x72414)
+#define SWF31 (dev_priv->info->display_mmio_offset + 0x72418)
+#define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c)
+
+/* Pipe B */
+#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
+#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
+#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
+#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040)
+#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044)
+#define _PIPEB_FRMCOUNT_GM45 0x71040
+#define _PIPEB_FLIPCOUNT_GM45 0x71044
+
+
+/* Display B control */
+#define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180)
+#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
+#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
+#define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184)
+#define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188)
+#define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C)
+#define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190)
+#define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C)
+#define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4)
+#define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4)
+#define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC)
+
+/* Sprite A control */
+#define _DVSACNTR 0x72180
+#define DVS_ENABLE (1UL<<31)
+#define DVS_GAMMA_ENABLE (1<<30)
+#define DVS_PIXFORMAT_MASK (3<<25)
+#define DVS_FORMAT_YUV422 (0<<25)
+#define DVS_FORMAT_RGBX101010 (1<<25)
+#define DVS_FORMAT_RGBX888 (2<<25)
+#define DVS_FORMAT_RGBX161616 (3<<25)
+#define DVS_PIPE_CSC_ENABLE (1<<24)
+#define DVS_SOURCE_KEY (1<<22)
+#define DVS_RGB_ORDER_XBGR (1<<20)
+#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
+#define DVS_YUV_ORDER_YUYV (0<<16)
+#define DVS_YUV_ORDER_UYVY (1<<16)
+#define DVS_YUV_ORDER_YVYU (2<<16)
+#define DVS_YUV_ORDER_VYUY (3<<16)
+#define DVS_DEST_KEY (1<<2)
+#define DVS_TRICKLE_FEED_DISABLE (1<<14)
+#define DVS_TILED (1<<10)
+#define _DVSALINOFF 0x72184
+#define _DVSASTRIDE 0x72188
+#define _DVSAPOS 0x7218c
+#define _DVSASIZE 0x72190
+#define _DVSAKEYVAL 0x72194
+#define _DVSAKEYMSK 0x72198
+#define _DVSASURF 0x7219c
+#define _DVSAKEYMAXVAL 0x721a0
+#define _DVSATILEOFF 0x721a4
+#define _DVSASURFLIVE 0x721ac
+#define _DVSASCALE 0x72204
+#define DVS_SCALE_ENABLE (1UL<<31)
+#define DVS_FILTER_MASK (3<<29)
+#define DVS_FILTER_MEDIUM (0<<29)
+#define DVS_FILTER_ENHANCING (1<<29)
+#define DVS_FILTER_SOFTENING (2<<29)
+#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _DVSAGAMC 0x72300
+
+#define _DVSBCNTR 0x73180
+#define _DVSBLINOFF 0x73184
+#define _DVSBSTRIDE 0x73188
+#define _DVSBPOS 0x7318c
+#define _DVSBSIZE 0x73190
+#define _DVSBKEYVAL 0x73194
+#define _DVSBKEYMSK 0x73198
+#define _DVSBSURF 0x7319c
+#define _DVSBKEYMAXVAL 0x731a0
+#define _DVSBTILEOFF 0x731a4
+#define _DVSBSURFLIVE 0x731ac
+#define _DVSBSCALE 0x73204
+#define _DVSBGAMC 0x73300
+
+#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
+#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
+#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
+#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
+#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
+#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
+#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
+#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
+#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
+#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
+#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
+
+#define _SPRA_CTL 0x70280
+#define SPRITE_ENABLE (1UL<<31)
+#define SPRITE_GAMMA_ENABLE (1<<30)
+#define SPRITE_PIXFORMAT_MASK (7<<25)
+#define SPRITE_FORMAT_YUV422 (0<<25)
+#define SPRITE_FORMAT_RGBX101010 (1<<25)
+#define SPRITE_FORMAT_RGBX888 (2<<25)
+#define SPRITE_FORMAT_RGBX161616 (3<<25)
+#define SPRITE_FORMAT_YUV444 (4<<25)
+#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
+#define SPRITE_PIPE_CSC_ENABLE (1<<24)
+#define SPRITE_SOURCE_KEY (1<<22)
+#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
+#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
+#define SPRITE_YUV_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
+#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
+#define SPRITE_YUV_ORDER_YUYV (0<<16)
+#define SPRITE_YUV_ORDER_UYVY (1<<16)
+#define SPRITE_YUV_ORDER_YVYU (2<<16)
+#define SPRITE_YUV_ORDER_VYUY (3<<16)
+#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
+#define SPRITE_INT_GAMMA_ENABLE (1<<13)
+#define SPRITE_TILED (1<<10)
+#define SPRITE_DEST_KEY (1<<2)
+#define _SPRA_LINOFF 0x70284
+#define _SPRA_STRIDE 0x70288
+#define _SPRA_POS 0x7028c
+#define _SPRA_SIZE 0x70290
+#define _SPRA_KEYVAL 0x70294
+#define _SPRA_KEYMSK 0x70298
+#define _SPRA_SURF 0x7029c
+#define _SPRA_KEYMAX 0x702a0
+#define _SPRA_TILEOFF 0x702a4
+#define _SPRA_OFFSET 0x702a4
+#define _SPRA_SURFLIVE 0x702ac
+#define _SPRA_SCALE 0x70304
+#define SPRITE_SCALE_ENABLE (1UL<<31)
+#define SPRITE_FILTER_MASK (3<<29)
+#define SPRITE_FILTER_MEDIUM (0<<29)
+#define SPRITE_FILTER_ENHANCING (1<<29)
+#define SPRITE_FILTER_SOFTENING (2<<29)
+#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _SPRA_GAMC 0x70400
+
+#define _SPRB_CTL 0x71280
+#define _SPRB_LINOFF 0x71284
+#define _SPRB_STRIDE 0x71288
+#define _SPRB_POS 0x7128c
+#define _SPRB_SIZE 0x71290
+#define _SPRB_KEYVAL 0x71294
+#define _SPRB_KEYMSK 0x71298
+#define _SPRB_SURF 0x7129c
+#define _SPRB_KEYMAX 0x712a0
+#define _SPRB_TILEOFF 0x712a4
+#define _SPRB_OFFSET 0x712a4
+#define _SPRB_SURFLIVE 0x712ac
+#define _SPRB_SCALE 0x71304
+#define _SPRB_GAMC 0x71400
+
+#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
+#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
+#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
+#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
+#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
+#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
+#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
+#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
+#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
+#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
+#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
+#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
+
+#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
+#define SP_ENABLE (1UL<<31)
+#define SP_GEAMMA_ENABLE (1<<30)
+#define SP_PIXFORMAT_MASK (0xf<<26)
+#define SP_FORMAT_YUV422 (0<<26)
+#define SP_FORMAT_BGR565 (5<<26)
+#define SP_FORMAT_BGRX8888 (6<<26)
+#define SP_FORMAT_BGRA8888 (7<<26)
+#define SP_FORMAT_RGBX1010102 (8<<26)
+#define SP_FORMAT_RGBA1010102 (9<<26)
+#define SP_FORMAT_RGBX8888 (0xe<<26)
+#define SP_FORMAT_RGBA8888 (0xf<<26)
+#define SP_SOURCE_KEY (1<<22)
+#define SP_YUV_BYTE_ORDER_MASK (3<<16)
+#define SP_YUV_ORDER_YUYV (0<<16)
+#define SP_YUV_ORDER_UYVY (1<<16)
+#define SP_YUV_ORDER_YVYU (2<<16)
+#define SP_YUV_ORDER_VYUY (3<<16)
+#define SP_TILED (1<<10)
+#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
+#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
+#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
+#define _SPASIZE (VLV_DISPLAY_BASE + 0x72190)
+#define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194)
+#define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198)
+#define _SPASURF (VLV_DISPLAY_BASE + 0x7219c)
+#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
+#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
+#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
+#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
+
+#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
+#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284)
+#define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288)
+#define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c)
+#define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290)
+#define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294)
+#define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298)
+#define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c)
+#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
+#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
+#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
+#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
+
+#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR)
+#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF)
+#define SPSTRIDE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASTRIDE, _SPBSTRIDE)
+#define SPPOS(pipe, plane) _PIPE(pipe * 2 + plane, _SPAPOS, _SPBPOS)
+#define SPSIZE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASIZE, _SPBSIZE)
+#define SPKEYMINVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMINVAL, _SPBKEYMINVAL)
+#define SPKEYMSK(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMSK, _SPBKEYMSK)
+#define SPSURF(pipe, plane) _PIPE(pipe * 2 + plane, _SPASURF, _SPBSURF)
+#define SPKEYMAXVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMAXVAL, _SPBKEYMAXVAL)
+#define SPTILEOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPATILEOFF, _SPBTILEOFF)
+#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
+#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
+
+/* VBIOS regs */
+#define VGACNTRL 0x71400
+# define VGA_DISP_DISABLE (1UL << 31) /* OSOL_i915 */
+# define VGA_2X_MODE (1 << 30)
+# define VGA_PIPE_B_SELECT (1 << 29)
+
+#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400)
+
+/* Ironlake */
+
+#define CPU_VGACNTRL 0x41000
+
+#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030
+#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
+#define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2)
+#define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2)
+#define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2)
+#define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2)
+#define DIGITAL_PORTA_NO_DETECT (0 << 0)
+#define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1)
+#define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0)
+
+/* refresh rate hardware control */
+#define RR_HW_CTL 0x45300
+#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
+#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
+
+#define FDI_PLL_BIOS_0 0x46000
+#define FDI_PLL_FB_CLOCK_MASK 0xff
+#define FDI_PLL_BIOS_1 0x46004
+#define FDI_PLL_BIOS_2 0x46008
+#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
+#define DISPLAY_PORT_PLL_BIOS_1 0x46010
+#define DISPLAY_PORT_PLL_BIOS_2 0x46014
+
+#define PCH_3DCGDIS0 0x46020
+# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
+# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
+
+#define PCH_3DCGDIS1 0x46024
+# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
+
+#define FDI_PLL_FREQ_CTL 0x46030
+#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
+#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
+#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
+
+
+#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030)
+#define PIPE_DATA_M1_OFFSET 0
+#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034)
+#define PIPE_DATA_N1_OFFSET 0
+
+#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038)
+#define PIPE_DATA_M2_OFFSET 0
+#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c)
+#define PIPE_DATA_N2_OFFSET 0
+
+#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040)
+#define PIPE_LINK_M1_OFFSET 0
+#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044)
+#define PIPE_LINK_N1_OFFSET 0
+
+#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048)
+#define PIPE_LINK_M2_OFFSET 0
+#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c)
+#define PIPE_LINK_N2_OFFSET 0
+
+/* PIPEB timing regs are same start from 0x61000 */
+
+#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030)
+#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034)
+
+#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038)
+#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c)
+
+#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040)
+#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044)
+
+#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048)
+#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c)
+
+#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
+#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
+#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
+#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
+#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
+#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
+#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
+#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
+
+/* CPU panel fitter */
+/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
+#define _PFA_CTL_1 0x68080
+#define _PFB_CTL_1 0x68880
+#define PF_ENABLE (1UL<<31) /* OSOL_i915 */
+#define PF_PIPE_SEL_MASK_IVB (3<<29)
+#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
+#define PF_FILTER_MASK (3<<23)
+#define PF_FILTER_PROGRAMMED (0<<23)
+#define PF_FILTER_MED_3x3 (1<<23)
+#define PF_FILTER_EDGE_ENHANCE (2<<23)
+#define PF_FILTER_EDGE_SOFTEN (3<<23)
+#define _PFA_WIN_SZ 0x68074
+#define _PFB_WIN_SZ 0x68874
+#define _PFA_WIN_POS 0x68070
+#define _PFB_WIN_POS 0x68870
+#define _PFA_VSCALE 0x68084
+#define _PFB_VSCALE 0x68884
+#define _PFA_HSCALE 0x68090
+#define _PFB_HSCALE 0x68890
+
+#define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
+#define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
+#define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
+#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
+#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
+
+/* legacy palette */
+#define _LGC_PALETTE_A 0x4a000
+#define _LGC_PALETTE_B 0x4a800
+#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
+
+#define _GAMMA_MODE_A 0x4a480
+#define _GAMMA_MODE_B 0x4ac80
+#define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
+#define GAMMA_MODE_MODE_MASK (3 << 0)
+#define GAMMA_MODE_MODE_8BIT (0 << 0)
+#define GAMMA_MODE_MODE_10BIT (1 << 0)
+#define GAMMA_MODE_MODE_12BIT (2 << 0)
+#define GAMMA_MODE_MODE_SPLIT (3 << 0)
+
+/* interrupts */
+#define DE_MASTER_IRQ_CONTROL (1UL << 31) /* OSOL_i915 */
+#define DE_SPRITEB_FLIP_DONE (1 << 29)
+#define DE_SPRITEA_FLIP_DONE (1 << 28)
+#define DE_PLANEB_FLIP_DONE (1 << 27)
+#define DE_PLANEA_FLIP_DONE (1 << 26)
+#define DE_PCU_EVENT (1 << 25)
+#define DE_GTT_FAULT (1 << 24)
+#define DE_POISON (1 << 23)
+#define DE_PERFORM_COUNTER (1 << 22)
+#define DE_PCH_EVENT (1 << 21)
+#define DE_AUX_CHANNEL_A (1 << 20)
+#define DE_DP_A_HOTPLUG (1 << 19)
+#define DE_GSE (1 << 18)
+#define DE_PIPEB_VBLANK (1 << 15)
+#define DE_PIPEB_EVEN_FIELD (1 << 14)
+#define DE_PIPEB_ODD_FIELD (1 << 13)
+#define DE_PIPEB_LINE_COMPARE (1 << 12)
+#define DE_PIPEB_VSYNC (1 << 11)
+#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
+#define DE_PIPEA_VBLANK (1 << 7)
+#define DE_PIPEA_EVEN_FIELD (1 << 6)
+#define DE_PIPEA_ODD_FIELD (1 << 5)
+#define DE_PIPEA_LINE_COMPARE (1 << 4)
+#define DE_PIPEA_VSYNC (1 << 3)
+#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
+
+/* More Ivybridge lolz */
+#define DE_ERR_INT_IVB (1<<30)
+#define DE_GSE_IVB (1<<29)
+#define DE_PCH_EVENT_IVB (1<<28)
+#define DE_DP_A_HOTPLUG_IVB (1<<27)
+#define DE_AUX_CHANNEL_A_IVB (1<<26)
+#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
+#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
+#define DE_PIPEC_VBLANK_IVB (1<<10)
+#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
+#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
+#define DE_PIPEB_VBLANK_IVB (1<<5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
+#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
+#define DE_PIPEA_VBLANK_IVB (1<<0)
+
+#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
+#define MASTER_INTERRUPT_ENABLE (1UL<<31)
+
+#define DEISR 0x44000
+#define DEIMR 0x44004
+#define DEIIR 0x44008
+#define DEIER 0x4400c
+
+#define GTISR 0x44010
+#define GTIMR 0x44014
+#define GTIIR 0x44018
+#define GTIER 0x4401c
+
+#define ILK_DISPLAY_CHICKEN2 0x42004
+/* Required on all Ironlake and Sandybridge according to the B-Spec. */
+#define ILK_ELPIN_409_SELECT (1 << 25)
+#define ILK_DPARB_GATE (1<<22)
+#define ILK_VSDPFD_FULL (1<<21)
+#define ILK_DISPLAY_CHICKEN_FUSES 0x42014
+#define ILK_INTERNAL_GRAPHICS_DISABLE (1UL<<31)
+#define ILK_INTERNAL_DISPLAY_DISABLE (1<<30)
+#define ILK_DISPLAY_DEBUG_DISABLE (1<<29)
+#define ILK_HDCP_DISABLE (1<<25)
+#define ILK_eDP_A_DISABLE (1<<24)
+#define ILK_DESKTOP (1<<23)
+
+#define ILK_DSPCLK_GATE_D 0x42020
+#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
+#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
+#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
+#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
+
+#define IVB_CHICKEN3 0x4200c
+#define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
+#define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
+
+#define CHICKEN_PAR1_1 0x42080
+#define FORCE_ARB_IDLE_PLANES (1 << 14)
+
+#define DISP_ARB_CTL 0x45000
+#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
+#define DISP_FBC_WM_DIS (1<<15)
+#define GEN7_MSG_CTL 0x45010
+#define WAIT_FOR_PCH_RESET_ACK (1<<1)
+#define WAIT_FOR_PCH_FLR_ACK (1<<0)
+
+/* GEN7 chicken */
+#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
+# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
+
+#define GEN7_L3CNTLREG1 0xB01C
+#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
+#define GEN7_L3AGDIS (1<<19)
+
+#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
+#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
+
+#define GEN7_L3SQCREG4 0xb034
+#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
+
+/* WaCatErrorRejectionIssue */
+#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
+#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
+
+#define HSW_FUSE_STRAP 0x42014
+#define HSW_CDCLK_LIMIT (1 << 24)
+
+/* PCH */
+
+/* south display engine interrupt */
+#define SDE_AUDIO_POWER_D (1 << 27)
+#define SDE_AUDIO_POWER_C (1 << 26)
+#define SDE_AUDIO_POWER_B (1 << 25)
+#define SDE_AUDIO_POWER_SHIFT (25)
+#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
+#define SDE_GMBUS (1 << 24)
+#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
+#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
+#define SDE_AUDIO_HDCP_MASK (3 << 22)
+#define SDE_AUDIO_TRANSB (1 << 21)
+#define SDE_AUDIO_TRANSA (1 << 20)
+#define SDE_AUDIO_TRANS_MASK (3 << 20)
+#define SDE_POISON (1 << 19)
+/* 18 reserved */
+#define SDE_FDI_RXB (1 << 17)
+#define SDE_FDI_RXA (1 << 16)
+#define SDE_FDI_MASK (3 << 16)
+#define SDE_AUXD (1 << 15)
+#define SDE_AUXC (1 << 14)
+#define SDE_AUXB (1 << 13)
+#define SDE_AUX_MASK (7 << 13)
+/* 12 reserved */
+#define SDE_CRT_HOTPLUG (1 << 11)
+#define SDE_PORTD_HOTPLUG (1 << 10)
+#define SDE_PORTC_HOTPLUG (1 << 9)
+#define SDE_PORTB_HOTPLUG (1 << 8)
+#define SDE_SDVOB_HOTPLUG (1 << 6)
+#define SDE_HOTPLUG_MASK (SDE_CRT_HOTPLUG | \
+ SDE_SDVOB_HOTPLUG | \
+ SDE_PORTB_HOTPLUG | \
+ SDE_PORTC_HOTPLUG | \
+ SDE_PORTD_HOTPLUG)
+#define SDE_TRANSB_CRC_DONE (1 << 5)
+#define SDE_TRANSB_CRC_ERR (1 << 4)
+#define SDE_TRANSB_FIFO_UNDER (1 << 3)
+#define SDE_TRANSA_CRC_DONE (1 << 2)
+#define SDE_TRANSA_CRC_ERR (1 << 1)
+#define SDE_TRANSA_FIFO_UNDER (1 << 0)
+#define SDE_TRANS_MASK (0x3f)
+/* CPT */
+/* south display engine interrupt: CPT/PPT */
+#define SDE_AUDIO_POWER_D_CPT (1UL << 31)
+#define SDE_AUDIO_POWER_C_CPT (1 << 30)
+#define SDE_AUDIO_POWER_B_CPT (1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT 29
+#define SDE_AUDIO_POWER_MASK_CPT (7UL << 29)
+#define SDE_AUXD_CPT (1 << 27)
+#define SDE_AUXC_CPT (1 << 26)
+#define SDE_AUXB_CPT (1 << 25)
+#define SDE_AUX_MASK_CPT (7 << 25)
+#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+#define SDE_SDVOB_HOTPLUG_CPT (1 << 18)
+#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
+ SDE_SDVOB_HOTPLUG_CPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT)
+#define SDE_GMBUS_CPT (1 << 17)
+#define SDE_ERROR_CPT (1 << 16)
+#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
+#define SDE_FDI_RXC_CPT (1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
+#define SDE_FDI_RXB_CPT (1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
+#define SDE_FDI_RXA_CPT (1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
+ SDE_AUDIO_CP_REQ_B_CPT | \
+ SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
+ SDE_AUDIO_CP_CHG_B_CPT | \
+ SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
+ SDE_FDI_RXB_CPT | \
+ SDE_FDI_RXA_CPT)
+
+#define SDEISR 0xc4000
+#define SDEIMR 0xc4004
+#define SDEIIR 0xc4008
+#define SDEIER 0xc400c
+
+#define SERR_INT 0xc4040
+#define SERR_INT_POISON (1UL<<31)
+#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
+#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
+#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
+
+/* digital port hotplug */
+#define PCH_PORT_HOTPLUG 0xc4030
+#define PORTD_HOTPLUG_ENABLE (1 << 20)
+#define PORTD_PULSE_DURATION_2ms (0)
+#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
+#define PORTD_PULSE_DURATION_6ms (2 << 18)
+#define PORTD_PULSE_DURATION_100ms (3 << 18)
+#define PORTD_PULSE_DURATION_MASK (3 << 18)
+#define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16)
+#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
+#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
+#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
+#define PORTC_HOTPLUG_ENABLE (1 << 12)
+#define PORTC_PULSE_DURATION_2ms (0)
+#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
+#define PORTC_PULSE_DURATION_6ms (2 << 10)
+#define PORTC_PULSE_DURATION_100ms (3 << 10)
+#define PORTC_PULSE_DURATION_MASK (3 << 10)
+#define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8)
+#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
+#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
+#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
+#define PORTB_HOTPLUG_ENABLE (1 << 4)
+#define PORTB_PULSE_DURATION_2ms (0)
+#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
+#define PORTB_PULSE_DURATION_6ms (2 << 2)
+#define PORTB_PULSE_DURATION_100ms (3 << 2)
+#define PORTB_PULSE_DURATION_MASK (3 << 2)
+#define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0)
+#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
+#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
+#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
+
+#define PCH_GPIOA 0xc5010
+#define PCH_GPIOB 0xc5014
+#define PCH_GPIOC 0xc5018
+#define PCH_GPIOD 0xc501c
+#define PCH_GPIOE 0xc5020
+#define PCH_GPIOF 0xc5024
+
+#define PCH_GMBUS0 0xc5100
+#define PCH_GMBUS1 0xc5104
+#define PCH_GMBUS2 0xc5108
+#define PCH_GMBUS3 0xc510c
+#define PCH_GMBUS4 0xc5110
+#define PCH_GMBUS5 0xc5120
+
+#define _PCH_DPLL_A 0xc6014
+#define _PCH_DPLL_B 0xc6018
+#define PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+
+#define _PCH_FPA0 0xc6040
+#define FP_CB_TUNE (0x3<<22)
+#define _PCH_FPA1 0xc6044
+#define _PCH_FPB0 0xc6048
+#define _PCH_FPB1 0xc604c
+#define PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
+
+#define PCH_DPLL_TEST 0xc606c
+
+#define PCH_DREF_CONTROL 0xC6200
+#define DREF_CONTROL_MASK 0x7fc3
+#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
+#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
+#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13)
+#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
+#define DREF_SSC_SOURCE_DISABLE (0<<11)
+#define DREF_SSC_SOURCE_ENABLE (2<<11)
+#define DREF_SSC_SOURCE_MASK (3<<11)
+#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
+#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
+#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
+#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
+#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
+#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
+#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7)
+#define DREF_SSC4_DOWNSPREAD (0<<6)
+#define DREF_SSC4_CENTERSPREAD (1<<6)
+#define DREF_SSC1_DISABLE (0<<1)
+#define DREF_SSC1_ENABLE (1<<1)
+#define DREF_SSC4_DISABLE (0)
+#define DREF_SSC4_ENABLE (1)
+
+#define PCH_RAWCLK_FREQ 0xc6204
+#define FDL_TP1_TIMER_SHIFT 12
+#define FDL_TP1_TIMER_MASK (3<<12)
+#define FDL_TP2_TIMER_SHIFT 10
+#define FDL_TP2_TIMER_MASK (3<<10)
+#define RAWCLK_FREQ_MASK 0x3ff
+
+#define PCH_DPLL_TMR_CFG 0xc6208
+
+#define PCH_SSC4_PARMS 0xc6210
+#define PCH_SSC4_AUX_PARMS 0xc6214
+
+#define PCH_DPLL_SEL 0xc7000
+#define TRANS_DPLLB_SEL(pipe) (1 << (pipe * 4))
+#define TRANS_DPLLA_SEL(pipe) 0
+#define TRANS_DPLL_ENABLE(pipe) (1 << (pipe * 4 + 3))
+
+/* transcoder */
+
+#define _PCH_TRANS_HTOTAL_A 0xe0000
+#define TRANS_HTOTAL_SHIFT 16
+#define TRANS_HACTIVE_SHIFT 0
+#define _PCH_TRANS_HBLANK_A 0xe0004
+#define TRANS_HBLANK_END_SHIFT 16
+#define TRANS_HBLANK_START_SHIFT 0
+#define _PCH_TRANS_HSYNC_A 0xe0008
+#define TRANS_HSYNC_END_SHIFT 16
+#define TRANS_HSYNC_START_SHIFT 0
+#define _PCH_TRANS_VTOTAL_A 0xe000c
+#define TRANS_VTOTAL_SHIFT 16
+#define TRANS_VACTIVE_SHIFT 0
+#define _PCH_TRANS_VBLANK_A 0xe0010
+#define TRANS_VBLANK_END_SHIFT 16
+#define TRANS_VBLANK_START_SHIFT 0
+#define _PCH_TRANS_VSYNC_A 0xe0014
+#define TRANS_VSYNC_END_SHIFT 16
+#define TRANS_VSYNC_START_SHIFT 0
+#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
+
+#define _PCH_TRANSA_DATA_M1 0xe0030
+#define _PCH_TRANSA_DATA_N1 0xe0034
+#define _PCH_TRANSA_DATA_M2 0xe0038
+#define _PCH_TRANSA_DATA_N2 0xe003c
+#define _PCH_TRANSA_LINK_M1 0xe0040
+#define _PCH_TRANSA_LINK_N1 0xe0044
+#define _PCH_TRANSA_LINK_M2 0xe0048
+#define _PCH_TRANSA_LINK_N2 0xe004c
+
+/* Per-transcoder DIP controls */
+
+#define _VIDEO_DIP_CTL_A 0xe0200
+#define _VIDEO_DIP_DATA_A 0xe0208
+#define _VIDEO_DIP_GCP_A 0xe0210
+
+#define _VIDEO_DIP_CTL_B 0xe1200
+#define _VIDEO_DIP_DATA_B 0xe1208
+#define _VIDEO_DIP_GCP_B 0xe1210
+
+#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
+#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
+#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+
+#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
+#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
+
+#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
+#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
+
+#define VLV_TVIDEO_DIP_CTL(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+#define VLV_TVIDEO_DIP_DATA(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+#define VLV_TVIDEO_DIP_GCP(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+
+/* Haswell DIP controls */
+#define HSW_VIDEO_DIP_CTL_A 0x60200
+#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220
+#define HSW_VIDEO_DIP_VS_DATA_A 0x60260
+#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
+#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
+#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240
+#define HSW_VIDEO_DIP_VS_ECC_A 0x60280
+#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
+#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300
+#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344
+#define HSW_VIDEO_DIP_GCP_A 0x60210
+
+#define HSW_VIDEO_DIP_CTL_B 0x61200
+#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220
+#define HSW_VIDEO_DIP_VS_DATA_B 0x61260
+#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
+#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
+#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240
+#define HSW_VIDEO_DIP_VS_ECC_B 0x61280
+#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
+#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300
+#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
+#define HSW_VIDEO_DIP_GCP_B 0x61210
+
+#define HSW_TVIDEO_DIP_CTL(trans) \
+ _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
+#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
+ _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
+ _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
+#define HSW_TVIDEO_DIP_GCP(trans) \
+ _TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
+ _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
+
+#define _PCH_TRANS_HTOTAL_B 0xe1000
+#define _PCH_TRANS_HBLANK_B 0xe1004
+#define _PCH_TRANS_HSYNC_B 0xe1008
+#define _PCH_TRANS_VTOTAL_B 0xe100c
+#define _PCH_TRANS_VBLANK_B 0xe1010
+#define _PCH_TRANS_VSYNC_B 0xe1014
+#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
+
+#define PCH_TRANS_HTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
+#define PCH_TRANS_HBLANK(pipe) _PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
+#define PCH_TRANS_HSYNC(pipe) _PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
+#define PCH_TRANS_VTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
+#define PCH_TRANS_VBLANK(pipe) _PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
+#define PCH_TRANS_VSYNC(pipe) _PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
+#define PCH_TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, \
+ _PCH_TRANS_VSYNCSHIFT_B)
+
+#define _PCH_TRANSB_DATA_M1 0xe1030
+#define _PCH_TRANSB_DATA_N1 0xe1034
+#define _PCH_TRANSB_DATA_M2 0xe1038
+#define _PCH_TRANSB_DATA_N2 0xe103c
+#define _PCH_TRANSB_LINK_M1 0xe1040
+#define _PCH_TRANSB_LINK_N1 0xe1044
+#define _PCH_TRANSB_LINK_M2 0xe1048
+#define _PCH_TRANSB_LINK_N2 0xe104c
+
+#define PCH_TRANS_DATA_M1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
+#define PCH_TRANS_DATA_N1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
+#define PCH_TRANS_DATA_M2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
+#define PCH_TRANS_DATA_N2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
+#define PCH_TRANS_LINK_M1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
+#define PCH_TRANS_LINK_N1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
+#define PCH_TRANS_LINK_M2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
+#define PCH_TRANS_LINK_N2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
+
+#define _PCH_TRANSACONF 0xf0008
+#define _PCH_TRANSBCONF 0xf1008
+#define PCH_TRANSCONF(pipe) _PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
+#define LPT_TRANSCONF _PCH_TRANSACONF /* lpt has only one transcoder */
+#define TRANS_DISABLE (0<<31)
+#define TRANS_ENABLE (1UL<<31) /* OSOL_i915 */
+#define TRANS_STATE_MASK (1<<30)
+#define TRANS_STATE_DISABLE (0<<30)
+#define TRANS_STATE_ENABLE (1<<30)
+#define TRANS_FSYNC_DELAY_HB1 (0<<27)
+#define TRANS_FSYNC_DELAY_HB2 (1<<27)
+#define TRANS_FSYNC_DELAY_HB3 (2<<27)
+#define TRANS_FSYNC_DELAY_HB4 (3<<27)
+#define TRANS_INTERLACE_MASK (7<<21)
+#define TRANS_PROGRESSIVE (0<<21)
+#define TRANS_INTERLACED (3<<21)
+#define TRANS_LEGACY_INTERLACED_ILK (2<<21)
+#define TRANS_8BPC (0<<5)
+#define TRANS_10BPC (1<<5)
+#define TRANS_6BPC (2<<5)
+#define TRANS_12BPC (3<<5)
+
+#define _TRANSA_CHICKEN1 0xf0060
+#define _TRANSB_CHICKEN1 0xf1060
+#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
+#define _TRANSA_CHICKEN2 0xf0064
+#define _TRANSB_CHICKEN2 0xf1064
+#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
+#define TRANS_CHICKEN2_TIMING_OVERRIDE (1UL<<31)
+#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
+#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25)
+
+#define SOUTH_CHICKEN1 0xc2000
+#define FDIA_PHASE_SYNC_SHIFT_OVR 19
+#define FDIA_PHASE_SYNC_SHIFT_EN 18
+#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_BC_BIFURCATION_SELECT (1 << 12)
+#define SOUTH_CHICKEN2 0xc2004
+#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
+#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
+#define DPLS_EDP_PPS_FIX_DIS (1<<0)
+
+#define _FDI_RXA_CHICKEN 0xc200c
+#define _FDI_RXB_CHICKEN 0xc2010
+#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
+#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
+#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
+
+#define SOUTH_DSPCLK_GATE_D 0xc2020
+#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
+
+/* CPU: FDI_TX */
+#define _FDI_TXA_CTL 0x60100
+#define _FDI_TXB_CTL 0x61100
+#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
+#define FDI_TX_DISABLE (0<<31)
+#define FDI_TX_ENABLE (1UL<<31) /* OSOL_i915 */
+#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
+#define FDI_LINK_TRAIN_PATTERN_2 (1<<28)
+#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28)
+#define FDI_LINK_TRAIN_NONE (3<<28)
+#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25)
+#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25)
+#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
+#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
+/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
+ SNB has different settings. */
+/* SNB A-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
+#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
+#define FDI_DP_PORT_WIDTH_SHIFT 19
+#define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT)
+#define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT)
+#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
+/* Ironlake: hardwired to 1 */
+#define FDI_TX_PLL_ENABLE (1<<14)
+
+/* Ivybridge has different bits for lolz */
+#define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8)
+#define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8)
+#define FDI_LINK_TRAIN_NONE_IVB (3<<8)
+
+/* both Tx and Rx */
+#define FDI_COMPOSITE_SYNC (1<<11)
+#define FDI_LINK_TRAIN_AUTO (1<<10)
+#define FDI_SCRAMBLING_ENABLE (0<<7)
+#define FDI_SCRAMBLING_DISABLE (1<<7)
+
+/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
+#define _FDI_RXA_CTL 0xf000c
+#define _FDI_RXB_CTL 0xf100c
+#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
+#define FDI_RX_ENABLE (1UL<<31)
+/* train, dp width same as FDI_TX */
+#define FDI_FS_ERRC_ENABLE (1<<27)
+#define FDI_FE_ERRC_ENABLE (1<<26)
+#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
+#define FDI_8BPC (0<<16)
+#define FDI_10BPC (1<<16)
+#define FDI_6BPC (2<<16)
+#define FDI_12BPC (3<<16)
+#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15)
+#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
+#define FDI_RX_PLL_ENABLE (1<<13)
+#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
+#define FDI_FE_ERR_CORRECT_ENABLE (1<<10)
+#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
+#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
+#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
+#define FDI_PCDCLK (1<<4)
+/* CPT */
+#define FDI_AUTO_TRAINING (1<<10)
+#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
+#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
+#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
+#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
+
+#define _FDI_RXA_MISC 0xf0010
+#define _FDI_RXB_MISC 0xf1010
+#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
+#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
+#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
+#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
+#define FDI_RX_TP1_TO_TP2_48 (2<<20)
+#define FDI_RX_TP1_TO_TP2_64 (3<<20)
+#define FDI_RX_FDI_DELAY_90 (0x90<<0)
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+
+#define _FDI_RXA_TUSIZE1 0xf0030
+#define _FDI_RXA_TUSIZE2 0xf0038
+#define _FDI_RXB_TUSIZE1 0xf1030
+#define _FDI_RXB_TUSIZE2 0xf1038
+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
+
+/* FDI_RX interrupt register format */
+#define FDI_RX_INTER_LANE_ALIGN (1<<10)
+#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */
+#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */
+#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7)
+#define FDI_RX_FS_CODE_ERR (1<<6)
+#define FDI_RX_FE_CODE_ERR (1<<5)
+#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4)
+#define FDI_RX_HDCP_LINK_FAIL (1<<3)
+#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2)
+#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
+#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
+
+#define _FDI_RXA_IIR 0xf0014
+#define _FDI_RXA_IMR 0xf0018
+#define _FDI_RXB_IIR 0xf1014
+#define _FDI_RXB_IMR 0xf1018
+#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
+
+#define FDI_PLL_CTL_1 0xfe000
+#define FDI_PLL_CTL_2 0xfe004
+
+#define PCH_LVDS 0xe1180
+#define LVDS_DETECTED (1 << 1)
+
+/* vlv has 2 sets of panel control regs. */
+#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
+#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
+#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
+#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
+#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
+
+#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300)
+#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304)
+#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308)
+#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
+#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
+
+#define VLV_PIPE_PP_STATUS(pipe) _PIPE(pipe, PIPEA_PP_STATUS, PIPEB_PP_STATUS)
+#define VLV_PIPE_PP_CONTROL(pipe) _PIPE(pipe, PIPEA_PP_CONTROL, PIPEB_PP_CONTROL)
+#define VLV_PIPE_PP_ON_DELAYS(pipe) \
+ _PIPE(pipe, PIPEA_PP_ON_DELAYS, PIPEB_PP_ON_DELAYS)
+#define VLV_PIPE_PP_OFF_DELAYS(pipe) \
+ _PIPE(pipe, PIPEA_PP_OFF_DELAYS, PIPEB_PP_OFF_DELAYS)
+#define VLV_PIPE_PP_DIVISOR(pipe) \
+ _PIPE(pipe, PIPEA_PP_DIVISOR, PIPEB_PP_DIVISOR)
+
+#define PCH_PP_STATUS 0xc7200
+#define PCH_PP_CONTROL 0xc7204
+#define PANEL_UNLOCK_REGS (0xabcdUL << 16)
+#define PANEL_UNLOCK_MASK (0xffffUL << 16)
+#define EDP_FORCE_VDD (1 << 3)
+#define EDP_BLC_ENABLE (1 << 2)
+#define PANEL_POWER_RESET (1 << 1)
+#define PANEL_POWER_OFF (0 << 0)
+#define PANEL_POWER_ON (1 << 0)
+#define PCH_PP_ON_DELAYS 0xc7208
+#define PANEL_PORT_SELECT_MASK (3UL << 30)
+#define PANEL_PORT_SELECT_LVDS (0 << 30)
+#define PANEL_PORT_SELECT_DPA (1 << 30)
+#define EDP_PANEL (1 << 30)
+#define PANEL_PORT_SELECT_DPC (2UL << 30)
+#define PANEL_PORT_SELECT_DPD (3UL << 30)
+#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
+#define PANEL_POWER_UP_DELAY_SHIFT 16
+#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
+#define PANEL_LIGHT_ON_DELAY_SHIFT 0
+
+#define PCH_PP_OFF_DELAYS 0xc720c
+#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
+#define PANEL_POWER_PORT_LVDS (0 << 30)
+#define PANEL_POWER_PORT_DP_A (1 << 30)
+#define PANEL_POWER_PORT_DP_C (2 << 30)
+#define PANEL_POWER_PORT_DP_D (3UL << 30)
+#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
+#define PANEL_POWER_DOWN_DELAY_SHIFT 16
+#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
+#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
+
+#define PCH_PP_DIVISOR 0xc7210
+#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
+#define PP_REFERENCE_DIVIDER_SHIFT 8
+#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
+#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
+
+#define PCH_DP_B 0xe4100
+#define PCH_DPB_AUX_CH_CTL 0xe4110
+#define PCH_DPB_AUX_CH_DATA1 0xe4114
+#define PCH_DPB_AUX_CH_DATA2 0xe4118
+#define PCH_DPB_AUX_CH_DATA3 0xe411c
+#define PCH_DPB_AUX_CH_DATA4 0xe4120
+#define PCH_DPB_AUX_CH_DATA5 0xe4124
+
+#define PCH_DP_C 0xe4200
+#define PCH_DPC_AUX_CH_CTL 0xe4210
+#define PCH_DPC_AUX_CH_DATA1 0xe4214
+#define PCH_DPC_AUX_CH_DATA2 0xe4218
+#define PCH_DPC_AUX_CH_DATA3 0xe421c
+#define PCH_DPC_AUX_CH_DATA4 0xe4220
+#define PCH_DPC_AUX_CH_DATA5 0xe4224
+
+#define PCH_DP_D 0xe4300
+#define PCH_DPD_AUX_CH_CTL 0xe4310
+#define PCH_DPD_AUX_CH_DATA1 0xe4314
+#define PCH_DPD_AUX_CH_DATA2 0xe4318
+#define PCH_DPD_AUX_CH_DATA3 0xe431c
+#define PCH_DPD_AUX_CH_DATA4 0xe4320
+#define PCH_DPD_AUX_CH_DATA5 0xe4324
+
+/* CPT */
+#define PORT_TRANS_A_SEL_CPT 0
+#define PORT_TRANS_B_SEL_CPT (1<<29)
+#define PORT_TRANS_C_SEL_CPT (2<<29)
+#define PORT_TRANS_SEL_MASK (3<<29)
+#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
+#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
+#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
+
+#define TRANS_DP_CTL_A 0xe0300
+#define TRANS_DP_CTL_B 0xe1300
+#define TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
+#define TRANS_DP_OUTPUT_ENABLE (1UL<<31)
+#define TRANS_DP_PORT_SEL_B (0<<29)
+#define TRANS_DP_PORT_SEL_C (1<<29)
+#define TRANS_DP_PORT_SEL_D (2<<29)
+#define TRANS_DP_PORT_SEL_NONE (3<<29)
+#define TRANS_DP_PORT_SEL_MASK (3<<29)
+#define TRANS_DP_AUDIO_ONLY (1<<26)
+#define TRANS_DP_ENH_FRAMING (1UL<<18)
+#define TRANS_DP_8BPC (0<<9)
+#define TRANS_DP_10BPC (1<<9)
+#define TRANS_DP_6BPC (2<<9)
+#define TRANS_DP_12BPC (3<<9)
+#define TRANS_DP_BPC_MASK (3<<9)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
+#define TRANS_DP_VSYNC_ACTIVE_LOW 0
+#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
+#define TRANS_DP_HSYNC_ACTIVE_LOW 0
+#define TRANS_DP_SYNC_MASK (3<<3)
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22)
+#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22)
+#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22)
+#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
+
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+
+#define FORCEWAKE 0xA18C
+#define FORCEWAKE_VLV 0x1300b0
+#define FORCEWAKE_ACK_VLV 0x1300b4
+#define FORCEWAKE_MEDIA_VLV 0x1300b8
+#define FORCEWAKE_ACK_MEDIA_VLV 0x1300bc
+#define FORCEWAKE_ACK_HSW 0x130044
+#define FORCEWAKE_ACK 0x130090
+#define VLV_GTLC_WAKE_CTRL 0x130090
+#define VLV_GTLC_PW_STATUS 0x130094
+#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_KERNEL 0x1
+#define FORCEWAKE_USER 0x2
+#define FORCEWAKE_MT_ACK 0x130040
+#define ECOBUS 0xa180
+#define FORCEWAKE_MT_ENABLE (1<<5)
+
+#define GTFIFODBG 0x120000
+#define GT_FIFO_CPU_ERROR_MASK 7
+#define GT_FIFO_OVFERR (1<<2)
+#define GT_FIFO_IAWRERR (1<<1)
+#define GT_FIFO_IARDERR (1<<0)
+
+#define GT_FIFO_FREE_ENTRIES 0x120008
+#define GT_FIFO_NUM_RESERVED_ENTRIES 20
+
+#define GEN6_UCGCTL1 0x9400
+# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
+
+#define GEN6_UCGCTL2 0x9404
+# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
+# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
+#define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
+#define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
+#define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
+
+#define GEN7_UCGCTL4 0x940c
+#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+
+#define GEN6_RPNSWREQ 0xA008
+#define GEN6_TURBO_DISABLE (1UL<<31)
+#define GEN6_FREQUENCY(x) ((x)<<25)
+#define HSW_FREQUENCY(x) ((x)<<24)
+#define GEN6_OFFSET(x) ((x)<<19)
+#define GEN6_AGGRESSIVE_TURBO (0<<15)
+#define GEN6_RC_VIDEO_FREQ 0xA00C
+#define GEN6_RC_CONTROL 0xA090
+#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
+#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
+#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
+#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
+#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
+#define GEN7_RC_CTL_TO_MODE (1<<28)
+#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
+#define GEN6_RC_CTL_HW_ENABLE (1UL<<31)
+#define GEN6_RP_DOWN_TIMEOUT 0xA010
+#define GEN6_RP_INTERRUPT_LIMITS 0xA014
+#define GEN6_RPSTAT1 0xA01C
+#define GEN6_CAGF_SHIFT 8
+#define HSW_CAGF_SHIFT 7
+#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
+#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
+#define GEN6_RP_CONTROL 0xA024
+#define GEN6_RP_MEDIA_TURBO (1<<11)
+#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
+#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
+#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
+#define GEN6_RP_MEDIA_HW_MODE (1<<9)
+#define GEN6_RP_MEDIA_SW_MODE (0<<9)
+#define GEN6_RP_MEDIA_IS_GFX (1<<8)
+#define GEN6_RP_ENABLE (1<<7)
+#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
+#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
+#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
+#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0)
+#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
+#define GEN6_RP_UP_THRESHOLD 0xA02C
+#define GEN6_RP_DOWN_THRESHOLD 0xA030
+#define GEN6_RP_CUR_UP_EI 0xA050
+#define GEN6_CURICONT_MASK 0xffffff
+#define GEN6_RP_CUR_UP 0xA054
+#define GEN6_CURBSYTAVG_MASK 0xffffff
+#define GEN6_RP_PREV_UP 0xA058
+#define GEN6_RP_CUR_DOWN_EI 0xA05C
+#define GEN6_CURIAVG_MASK 0xffffff
+#define GEN6_RP_CUR_DOWN 0xA060
+#define GEN6_RP_PREV_DOWN 0xA064
+#define GEN6_RP_UP_EI 0xA068
+#define GEN6_RP_DOWN_EI 0xA06C
+#define GEN6_RP_IDLE_HYSTERSIS 0xA070
+#define GEN6_RC_STATE 0xA094
+#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
+#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
+#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0
+#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
+#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
+#define GEN6_RC_SLEEP 0xA0B0
+#define GEN6_RC1e_THRESHOLD 0xA0B4
+#define GEN6_RC6_THRESHOLD 0xA0B8
+#define GEN6_RC6p_THRESHOLD 0xA0BC
+#define GEN6_RC6pp_THRESHOLD 0xA0C0
+#define GEN6_PMINTRMSK 0xA168
+
+#define GEN6_PMISR 0x44020
+#define GEN6_PMIMR 0x44024 /* rps_lock */
+#define GEN6_PMIIR 0x44028
+#define GEN6_PMIER 0x4402C
+#define GEN6_PM_MBOX_EVENT (1<<25)
+#define GEN6_PM_THERMAL_EVENT (1<<24)
+#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
+#define GEN6_PM_RP_UP_THRESHOLD (1<<5)
+#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
+#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
+#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
+#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
+ GEN6_PM_RP_DOWN_THRESHOLD | \
+ GEN6_PM_RP_DOWN_TIMEOUT)
+
+#define GEN6_GT_GFX_RC6_LOCKED 0x138104
+#define GEN6_GT_GFX_RC6 0x138108
+#define GEN6_GT_GFX_RC6p 0x13810C
+#define GEN6_GT_GFX_RC6pp 0x138110
+
+#define GEN6_PCODE_MAILBOX 0x138124
+#define GEN6_PCODE_READY (1UL<<31)
+#define GEN6_READ_OC_PARAMS 0xc
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
+#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_WRITE_RC6VIDS 0x4
+#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
+#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
+#define GEN6_PCODE_DATA 0x138128
+#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
+#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
+
+#define GEN6_GT_CORE_STATUS 0x138060
+#define GEN6_CORE_CPD_STATE_MASK (7<<4)
+#define GEN6_RCn_MASK 7
+#define GEN6_RC0 0
+#define GEN6_RC3 2
+#define GEN6_RC6 3
+#define GEN6_RC7 4
+
+#define GEN7_MISCCPCTL (0x9424)
+#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
+
+/* IVYBRIDGE DPF */
+#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
+#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
+#define GEN7_PARITY_ERROR_VALID (1<<13)
+#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
+#define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8)
+#define GEN7_PARITY_ERROR_ROW(reg) \
+ ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
+#define GEN7_PARITY_ERROR_BANK(reg) \
+ ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
+#define GEN7_PARITY_ERROR_SUBBANK(reg) \
+ ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
+#define GEN7_L3CDERRST1_ENABLE (1<<7)
+
+#define GEN7_L3LOG_BASE 0xB070
+#define GEN7_L3LOG_SIZE 0x80
+
+#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
+#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
+#define GEN7_MAX_PS_THREAD_DEP (8UL<<12)
+#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+
+#define GEN7_ROW_CHICKEN2 0xe4f4
+#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
+#define DOP_CLOCK_GATING_DISABLE (1<<0)
+
+#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
+#define INTEL_AUDIO_DEVCL 0x808629FB
+#define INTEL_AUDIO_DEVBLC 0x80862801
+#define INTEL_AUDIO_DEVCTG 0x80862802
+
+#define G4X_AUD_CNTL_ST 0x620B4
+#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
+#define G4X_ELDV_DEVCTG (1 << 14)
+#define G4X_ELD_ADDR (0xf << 5)
+#define G4X_ELD_ACK (1 << 4)
+#define G4X_HDMIW_HDMIEDID 0x6210C
+
+#define IBX_HDMIW_HDMIEDID_A 0xE2050
+#define IBX_HDMIW_HDMIEDID_B 0xE2150
+#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+ IBX_HDMIW_HDMIEDID_A, \
+ IBX_HDMIW_HDMIEDID_B)
+#define IBX_AUD_CNTL_ST_A 0xE20B4
+#define IBX_AUD_CNTL_ST_B 0xE21B4
+#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+ IBX_AUD_CNTL_ST_A, \
+ IBX_AUD_CNTL_ST_B)
+#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
+#define IBX_ELD_ADDRESS (0x1f << 5)
+#define IBX_ELD_ACK (1 << 4)
+#define IBX_AUD_CNTL_ST2 0xE20C0
+#define IBX_ELD_VALIDB (1 << 0)
+#define IBX_CP_READYB (1 << 1)
+
+#define CPT_HDMIW_HDMIEDID_A 0xE5050
+#define CPT_HDMIW_HDMIEDID_B 0xE5150
+#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+ CPT_HDMIW_HDMIEDID_A, \
+ CPT_HDMIW_HDMIEDID_B)
+#define CPT_AUD_CNTL_ST_A 0xE50B4
+#define CPT_AUD_CNTL_ST_B 0xE51B4
+#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+ CPT_AUD_CNTL_ST_A, \
+ CPT_AUD_CNTL_ST_B)
+#define CPT_AUD_CNTRL_ST2 0xE50C0
+
+/* These are the 4 32-bit write offset registers for each stream
+ * output buffer. It determines the offset from the
+ * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
+ */
+#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
+
+#define IBX_AUD_CONFIG_A 0xe2000
+#define IBX_AUD_CONFIG_B 0xe2100
+#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
+ IBX_AUD_CONFIG_A, \
+ IBX_AUD_CONFIG_B)
+#define CPT_AUD_CONFIG_A 0xe5000
+#define CPT_AUD_CONFIG_B 0xe5100
+#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
+ CPT_AUD_CONFIG_A, \
+ CPT_AUD_CONFIG_B)
+#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
+#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
+#define AUD_CONFIG_UPPER_N_SHIFT 20
+#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20)
+#define AUD_CONFIG_LOWER_N_SHIFT 4
+#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
+#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+
+/* HSW Audio */
+#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */
+#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */
+#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
+ HSW_AUD_CONFIG_A, \
+ HSW_AUD_CONFIG_B)
+
+#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */
+#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */
+#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
+ HSW_AUD_MISC_CTRL_A, \
+ HSW_AUD_MISC_CTRL_B)
+
+#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */
+#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */
+#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
+ HSW_AUD_DIP_ELD_CTRL_ST_A, \
+ HSW_AUD_DIP_ELD_CTRL_ST_B)
+
+/* Audio Digital Converter */
+#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */
+#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */
+#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
+ HSW_AUD_DIG_CNVT_1, \
+ HSW_AUD_DIG_CNVT_2)
+#define DIP_PORT_SEL_MASK 0x3
+
+#define HSW_AUD_EDID_DATA_A 0x65050
+#define HSW_AUD_EDID_DATA_B 0x65150
+#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
+ HSW_AUD_EDID_DATA_A, \
+ HSW_AUD_EDID_DATA_B)
+
+#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */
+#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */
+#define AUDIO_INACTIVE_C (1<<11)
+#define AUDIO_INACTIVE_B (1<<7)
+#define AUDIO_INACTIVE_A (1<<3)
+#define AUDIO_OUTPUT_ENABLE_A (1<<2)
+#define AUDIO_OUTPUT_ENABLE_B (1<<6)
+#define AUDIO_OUTPUT_ENABLE_C (1<<10)
+#define AUDIO_ELD_VALID_A (1<<0)
+#define AUDIO_ELD_VALID_B (1<<4)
+#define AUDIO_ELD_VALID_C (1<<8)
+#define AUDIO_CP_READY_A (1<<1)
+#define AUDIO_CP_READY_B (1<<5)
+#define AUDIO_CP_READY_C (1<<9)
+
+/* HSW Power Wells */
+#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
+#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
+#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
+#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
+#define HSW_PWR_WELL_ENABLE (1UL<<31)
+#define HSW_PWR_WELL_STATE (1<<30)
+#define HSW_PWR_WELL_CTL5 0x45410
+#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1UL<<31)
+#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
+#define HSW_PWR_WELL_FORCE_ON (1<<19)
+#define HSW_PWR_WELL_CTL6 0x45414
+
+/* Per-pipe DDI Function Control */
+#define TRANS_DDI_FUNC_CTL_A 0x60400
+#define TRANS_DDI_FUNC_CTL_B 0x61400
+#define TRANS_DDI_FUNC_CTL_C 0x62400
+#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
+#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
+ TRANS_DDI_FUNC_CTL_B)
+#define TRANS_DDI_FUNC_ENABLE (1UL<<31)
+/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
+#define TRANS_DDI_PORT_MASK (7<<28)
+#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
+#define TRANS_DDI_PORT_NONE (0<<28)
+#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
+#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
+#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
+#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
+#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
+#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
+#define TRANS_DDI_BPC_MASK (7<<20)
+#define TRANS_DDI_BPC_8 (0<<20)
+#define TRANS_DDI_BPC_10 (1<<20)
+#define TRANS_DDI_BPC_6 (2<<20)
+#define TRANS_DDI_BPC_12 (3<<20)
+#define TRANS_DDI_PVSYNC (1<<17)
+#define TRANS_DDI_PHSYNC (1<<16)
+#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
+#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
+#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
+#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
+#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
+#define TRANS_DDI_BFI_ENABLE (1<<4)
+
+/* DisplayPort Transport Control */
+#define DP_TP_CTL_A 0x64040
+#define DP_TP_CTL_B 0x64140
+#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
+#define DP_TP_CTL_ENABLE (1UL<<31)
+#define DP_TP_CTL_MODE_SST (0<<27)
+#define DP_TP_CTL_MODE_MST (1<<27)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
+#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
+#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
+#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
+#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
+
+/* DisplayPort Transport Status */
+#define DP_TP_STATUS_A 0x64044
+#define DP_TP_STATUS_B 0x64144
+#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
+#define DP_TP_STATUS_IDLE_DONE (1<<25)
+#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+
+/* DDI Buffer Control */
+#define DDI_BUF_CTL_A 0x64000
+#define DDI_BUF_CTL_B 0x64100
+#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
+#define DDI_BUF_CTL_ENABLE (1UL<<31)
+#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
+#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
+#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
+#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
+#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
+#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
+#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
+#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
+#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
+#define DDI_BUF_EMP_MASK (0xf<<24)
+#define DDI_BUF_PORT_REVERSAL (1<<16)
+#define DDI_BUF_IS_IDLE (1<<7)
+#define DDI_A_4_LANES (1<<4)
+#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
+#define DDI_INIT_DISPLAY_DETECTED (1<<0)
+
+/* DDI Buffer Translations */
+#define DDI_BUF_TRANS_A 0x64E00
+#define DDI_BUF_TRANS_B 0x64E60
+#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
+
+/* Sideband Interface (SBI) is programmed indirectly, via
+ * SBI_ADDR, which contains the register offset; and SBI_DATA,
+ * which contains the payload */
+#define SBI_ADDR 0xC6000
+#define SBI_DATA 0xC6004
+#define SBI_CTL_STAT 0xC6008
+#define SBI_CTL_DEST_ICLK (0x0<<16)
+#define SBI_CTL_DEST_MPHY (0x1<<16)
+#define SBI_CTL_OP_IORD (0x2<<8)
+#define SBI_CTL_OP_IOWR (0x3<<8)
+#define SBI_CTL_OP_CRRD (0x6<<8)
+#define SBI_CTL_OP_CRWR (0x7<<8)
+#define SBI_RESPONSE_FAIL (0x1<<1)
+#define SBI_RESPONSE_SUCCESS (0x0<<1)
+#define SBI_BUSY (0x1<<0)
+#define SBI_READY (0x0<<0)
+
+/* SBI offsets */
+#define SBI_SSCDIVINTPHASE6 0x0600
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
+#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
+#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
+#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
+#define SBI_SSCCTL 0x020c
+#define SBI_SSCCTL6 0x060C
+#define SBI_SSCCTL_PATHALT (1<<3)
+#define SBI_SSCCTL_DISABLE (1<<0)
+#define SBI_SSCAUXDIV6 0x0610
+#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
+#define SBI_DBUFF0 0x2a00
+#define SBI_DBUFF0_ENABLE (1<<0)
+
+/* LPT PIXCLK_GATE */
+#define PIXCLK_GATE 0xC6020
+#define PIXCLK_GATE_UNGATE (1<<0)
+#define PIXCLK_GATE_GATE (0<<0)
+
+/* SPLL */
+#define SPLL_CTL 0x46020
+#define SPLL_PLL_ENABLE (1UL<<31)
+#define SPLL_PLL_SSC (1<<28)
+#define SPLL_PLL_NON_SSC (2<<28)
+#define SPLL_PLL_FREQ_810MHz (0<<26)
+#define SPLL_PLL_FREQ_1350MHz (1<<26)
+
+/* WRPLL */
+#define WRPLL_CTL1 0x46040
+#define WRPLL_CTL2 0x46060
+#define WRPLL_PLL_ENABLE (1UL<<31)
+#define WRPLL_PLL_SELECT_SSC (0x01<<28)
+#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
+#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
+/* WRPLL divider programming */
+#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_POST(x) ((x)<<8)
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+
+/* Port clock selection */
+#define PORT_CLK_SEL_A 0x46100
+#define PORT_CLK_SEL_B 0x46104
+#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
+#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
+#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
+#define PORT_CLK_SEL_LCPLL_810 (2<<29)
+#define PORT_CLK_SEL_SPLL (3<<29)
+#define PORT_CLK_SEL_WRPLL1 (4UL<<29)
+#define PORT_CLK_SEL_WRPLL2 (5UL<<29)
+#define PORT_CLK_SEL_NONE (7UL<<29)
+
+/* Transcoder clock selection */
+#define TRANS_CLK_SEL_A 0x46140
+#define TRANS_CLK_SEL_B 0x46144
+#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
+/* For each transcoder, we need to select the corresponding port clock */
+#define TRANS_CLK_SEL_DISABLED (0x0<<29)
+#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
+
+#define _TRANSA_MSA_MISC 0x60410
+#define _TRANSB_MSA_MISC 0x61410
+#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
+ _TRANSB_MSA_MISC)
+#define TRANS_MSA_SYNC_CLK (1<<0)
+#define TRANS_MSA_6_BPC (0<<5)
+#define TRANS_MSA_8_BPC (1<<5)
+#define TRANS_MSA_10_BPC (2<<5)
+#define TRANS_MSA_12_BPC (3<<5)
+#define TRANS_MSA_16_BPC (4<<5)
+
+/* LCPLL Control */
+#define LCPLL_CTL 0x130040
+#define LCPLL_PLL_DISABLE (1UL<<31)
+#define LCPLL_PLL_LOCK (1<<30)
+#define LCPLL_CLK_FREQ_MASK (3<<26)
+#define LCPLL_CLK_FREQ_450 (0<<26)
+#define LCPLL_CD_CLOCK_DISABLE (1<<25)
+#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
+#define LCPLL_CD_SOURCE_FCLK (1<<21)
+
+/* Pipe WM_LINETIME - watermark line time */
+#define PIPE_WM_LINETIME_A 0x45270
+#define PIPE_WM_LINETIME_B 0x45274
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
+ PIPE_WM_LINETIME_B)
+#define PIPE_WM_LINETIME_MASK (0x1ff)
+#define PIPE_WM_LINETIME_TIME(x) ((x))
+#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
+#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
+
+/* SFUSE_STRAP */
+#define SFUSE_STRAP 0xc2014
+#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
+#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
+#define SFUSE_STRAP_DDID_DETECTED (1<<0)
+
+#define WM_MISC 0x45260
+#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
+
+#define WM_DBG 0x45280
+#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
+#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
+#define WM_DBG_DISALLOW_SPRITE (1<<2)
+
+/* pipe CSC */
+#define _PIPE_A_CSC_COEFF_RY_GY 0x49010
+#define _PIPE_A_CSC_COEFF_BY 0x49014
+#define _PIPE_A_CSC_COEFF_RU_GU 0x49018
+#define _PIPE_A_CSC_COEFF_BU 0x4901c
+#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
+#define _PIPE_A_CSC_COEFF_BV 0x49024
+#define _PIPE_A_CSC_MODE 0x49028
+#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
+#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
+#define CSC_MODE_YUV_TO_RGB (1 << 0)
+#define _PIPE_A_CSC_PREOFF_HI 0x49030
+#define _PIPE_A_CSC_PREOFF_ME 0x49034
+#define _PIPE_A_CSC_PREOFF_LO 0x49038
+#define _PIPE_A_CSC_POSTOFF_HI 0x49040
+#define _PIPE_A_CSC_POSTOFF_ME 0x49044
+#define _PIPE_A_CSC_POSTOFF_LO 0x49048
+
+#define _PIPE_B_CSC_COEFF_RY_GY 0x49110
+#define _PIPE_B_CSC_COEFF_BY 0x49114
+#define _PIPE_B_CSC_COEFF_RU_GU 0x49118
+#define _PIPE_B_CSC_COEFF_BU 0x4911c
+#define _PIPE_B_CSC_COEFF_RV_GV 0x49120
+#define _PIPE_B_CSC_COEFF_BV 0x49124
+#define _PIPE_B_CSC_MODE 0x49128
+#define _PIPE_B_CSC_PREOFF_HI 0x49130
+#define _PIPE_B_CSC_PREOFF_ME 0x49134
+#define _PIPE_B_CSC_PREOFF_LO 0x49138
+#define _PIPE_B_CSC_POSTOFF_HI 0x49140
+#define _PIPE_B_CSC_POSTOFF_ME 0x49144
+#define _PIPE_B_CSC_POSTOFF_LO 0x49148
+
+#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
+#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
+#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
+#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU)
+#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV)
+#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV)
+#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE)
+#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI)
+#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME)
+#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO)
+#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI)
+#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
+#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
+
+//For gpu top
+#define _RING_TAIL 0x00
+#define _RING_HEAD 0x04
+#define _RING_LEN 0x0C
+
+#define INST_DONE 0x2090
+# define IDCT_DONE (1UL << 30)
+# define IQ_DONE (1 << 29)
+# define PR_DONE (1 << 28)
+# define VLD_DONE (1 << 27)
+# define IP_DONE (1 << 26)
+# define FBC_DONE (1 << 25)
+# define BINNER_DONE (1 << 24)
+# define SF_DONE (1 << 23)
+# define SE_DONE (1 << 22)
+# define WM_DONE (1 << 21)
+# define IZ_DONE (1 << 20)
+# define PERSPECTIVE_INTERP_DONE (1 << 19)
+# define DISPATCHER_DONE (1 << 18)
+# define PROJECTION_DONE (1 << 17)
+# define DEPENDENT_ADDRESS_DONE (1 << 16)
+# define QUAD_CACHE_DONE (1 << 15)
+# define TEXTURE_FETCH_DONE (1 << 14)
+# define TEXTURE_DECOMPRESS_DONE (1 << 13)
+# define SAMPLER_CACHE_DONE (1 << 12)
+# define FILTER_DONE (1 << 11)
+# define BYPASS_FIFO_DONE (1 << 10)
+# define PS_DONE (1 << 9)
+# define CC_DONE (1 << 8)
+# define MAP_FILTER_DONE (1 << 7)
+# define MAP_L2_IDLE (1 << 6)
+# define RING_2_ENABLE (1 << 2)
+# define RING_1_ENABLE (1 << 1)
+# define RING_0_ENABLE (1 << 0)
+
+# define I830_GMBUS_DONE (1 << 26)
+# define I830_FBC_DONE (1 << 25)
+# define I830_BINNER_DONE (1 << 24)
+# define I830_MPEG_DONE (1 << 23)
+# define I830_MECO_DONE (1 << 22)
+# define I830_MCD_DONE (1 << 21)
+# define I830_MCSTP_DONE (1 << 20)
+# define I830_CC_DONE (1 << 19)
+# define I830_DG_DONE (1 << 18)
+# define I830_DCMP_DONE (1 << 17)
+# define I830_FTCH_DONE (1 << 16)
+# define I830_IT_DONE (1 << 15)
+# define I830_MG_DONE (1 << 14)
+# define I830_MEC_DONE (1 << 13)
+# define I830_PC_DONE (1 << 12)
+# define I830_QCC_DONE (1 << 11)
+# define I830_TB_DONE (1 << 10)
+# define I830_WM_DONE (1 << 9)
+# define I830_EF_DONE (1 << 8)
+# define I830_BLITTER_DONE (1 << 7)
+# define I830_MAP_L2_DONE (1 << 6)
+# define I830_SECONDARY_RING_3_DONE (1 << 5)
+# define I830_SECONDARY_RING_2_DONE (1 << 4)
+# define I830_SECONDARY_RING_1_DONE (1 << 3)
+# define I830_SECONDARY_RING_0_DONE (1 << 2)
+# define I830_PRIMARY_RING_1_DONE (1 << 1)
+# define I830_PRIMARY_RING_0_DONE (1 << 0)
+
+#define INST_DONE_I965 0x206c
+# define I965_ROW_0_EU_0_DONE (1UL << 31)
+# define I965_ROW_0_EU_1_DONE (1UL << 30)
+# define I965_ROW_0_EU_2_DONE (1 << 29)
+# define I965_ROW_0_EU_3_DONE (1 << 28)
+# define I965_ROW_1_EU_0_DONE (1 << 27)
+# define I965_ROW_1_EU_1_DONE (1 << 26)
+# define I965_ROW_1_EU_2_DONE (1 << 25)
+# define I965_ROW_1_EU_3_DONE (1 << 24)
+# define I965_SF_DONE (1 << 23)
+# define I965_SE_DONE (1 << 22)
+# define I965_WM_DONE (1 << 21)
+# define I965_DISPATCHER_DONE (1 << 18)
+# define I965_PROJECTION_DONE (1 << 17)
+# define I965_DG_DONE (1 << 16)
+# define I965_QUAD_CACHE_DONE (1 << 15)
+# define I965_TEXTURE_FETCH_DONE (1 << 14)
+# define I965_TEXTURE_DECOMPRESS_DONE (1 << 13)
+# define I965_SAMPLER_CACHE_DONE (1 << 12)
+# define I965_FILTER_DONE (1 << 11)
+# define I965_BYPASS_DONE (1 << 10)
+# define I965_PS_DONE (1 << 9)
+# define I965_CC_DONE (1 << 8)
+# define I965_MAP_FILTER_DONE (1 << 7)
+# define I965_MAP_L2_IDLE (1 << 6)
+# define I965_MA_ROW_0_DONE (1 << 5)
+# define I965_MA_ROW_1_DONE (1 << 4)
+# define I965_IC_ROW_0_DONE (1 << 3)
+# define I965_IC_ROW_1_DONE (1 << 2)
+# define I965_CP_DONE (1 << 1)
+# define I965_RING_0_ENABLE (1 << 0)
+
+# define ILK_ROW_0_EU_0_DONE (1UL << 31)
+# define ILK_ROW_0_EU_1_DONE (1UL << 30)
+# define ILK_ROW_0_EU_2_DONE (1 << 29)
+# define ILK_ROW_0_EU_3_DONE (1 << 28)
+# define ILK_ROW_1_EU_0_DONE (1 << 27)
+# define ILK_ROW_1_EU_1_DONE (1 << 26)
+# define ILK_ROW_1_EU_2_DONE (1 << 25)
+# define ILK_ROW_1_EU_3_DONE (1 << 24)
+# define ILK_ROW_2_EU_0_DONE (1 << 23)
+# define ILK_ROW_2_EU_1_DONE (1 << 22)
+# define ILK_ROW_2_EU_2_DONE (1 << 21)
+# define ILK_ROW_2_EU_3_DONE (1 << 20)
+# define ILK_VCP_DONE (1 << 19)
+# define ILK_ROW_0_MATH_DONE (1 << 18)
+# define ILK_ROW_1_MATH_DONE (1 << 17)
+# define ILK_ROW_2_MATH_DONE (1 << 16)
+# define ILK_VC1_DONE (1 << 15)
+# define ILK_ROW_0_MA_DONE (1 << 14)
+# define ILK_ROW_1_MA_DONE (1 << 13)
+# define ILK_ROW_2_MA_DONE (1 << 12)
+# define ILK_ROW_0_ISC_DONE (1 << 11)
+# define ILK_ROW_1_ISC_DONE (1 << 10)
+# define ILK_ROW_2_ISC_DONE (1 << 9)
+# define ILK_VFE_DONE (1 << 8)
+# define ILK_TD_DONE (1 << 7)
+# define ILK_SVTS_DONE (1 << 6)
+# define ILK_TS_DONE (1 << 5)
+# define ILK_GW_DONE (1 << 4)
+# define ILK_AI_DONE (1 << 3)
+# define ILK_AC_DONE (1 << 2)
+# define ILK_AM_DONE (1 << 1)
+
+#define GEN6_INSTDONE_1 0x206c
+# define GEN6_MA_3_DONE (1UL << 31)
+# define GEN6_EU_32_DONE (1UL << 30)
+# define GEN6_EU_31_DONE (1 << 29)
+# define GEN6_EU_30_DONE (1 << 28)
+# define GEN6_MA_2_DONE (1 << 27)
+# define GEN6_EU_22_DONE (1 << 26)
+# define GEN6_EU_21_DONE (1 << 25)
+# define GEN6_EU_20_DONE (1 << 24)
+# define GEN6_MA_1_DONE (1 << 23)
+# define GEN6_EU_12_DONE (1 << 22)
+# define GEN6_EU_11_DONE (1 << 21)
+# define GEN6_EU_10_DONE (1 << 20)
+# define GEN6_MA_0_DONE (1 << 19)
+# define GEN6_EU_02_DONE (1 << 18)
+# define GEN6_EU_01_DONE (1 << 17)
+# define GEN6_EU_00_DONE (1 << 16)
+# define GEN6_IC_3_DONE (1 << 15)
+# define GEN6_IC_2_DONE (1 << 14)
+# define GEN6_IC_1_DONE (1 << 13)
+# define GEN6_IC_0_DONE (1 << 12)
+# define GEN6_ISC_10_DONE (1 << 11)
+# define GEN6_ISC_32_DONE (1 << 10)
+# define GEN6_VSC_DONE (1 << 9)
+# define GEN6_IEF_DONE (1 << 8)
+# define GEN6_VFE_DONE (1 << 7)
+# define GEN6_TD_DONE (1 << 6)
+# define GEN6_TS_DONE (1 << 4)
+# define GEN6_GW_DONE (1 << 3)
+# define GEN6_HIZ_DONE (1 << 2)
+# define GEN6_AVS_DONE (1 << 1)
+
+
+/* Current primary/secondary DMA fetch addresses:
+ */
+#define DMA_FADD_P 0x2078
+#define DMA_FADD_S 0x20d4
+#define INST_DONE_1 0x207c
+# define I965_GW_CS_DONE_CR (1 << 19)
+# define I965_SVSM_CS_DONE_CR (1 << 18)
+# define I965_SVDW_CS_DONE_CR (1 << 17)
+# define I965_SVDR_CS_DONE_CR (1 << 16)
+# define I965_SVRW_CS_DONE_CR (1 << 15)
+# define I965_SVRR_CS_DONE_CR (1 << 14)
+# define I965_SVTW_CS_DONE_CR (1 << 13)
+# define I965_MASM_CS_DONE_CR (1 << 12)
+# define I965_MASF_CS_DONE_CR (1 << 11)
+# define I965_MAW_CS_DONE_CR (1 << 10)
+# define I965_EM1_CS_DONE_CR (1 << 9)
+# define I965_EM0_CS_DONE_CR (1 << 8)
+# define I965_UC1_CS_DONE (1 << 7)
+# define I965_UC0_CS_DONE (1 << 6)
+# define I965_URB_CS_DONE (1 << 5)
+# define I965_ISC_CS_DONE (1 << 4)
+# define I965_CL_CS_DONE (1 << 3)
+# define I965_GS_CS_DONE (1 << 2)
+# define I965_VS0_CS_DONE (1 << 1)
+# define I965_VF_CS_DONE (1 << 0)
+
+# define G4X_BCS_DONE (1UL << 31)
+# define G4X_CS_DONE (1UL << 30)
+# define G4X_MASF_DONE (1 << 29)
+# define G4X_SVDW_DONE (1 << 28)
+# define G4X_SVDR_DONE (1 << 27)
+# define G4X_SVRW_DONE (1 << 26)
+# define G4X_SVRR_DONE (1 << 25)
+# define G4X_ISC_DONE (1 << 24)
+# define G4X_MT_DONE (1 << 23)
+# define G4X_RC_DONE (1 << 22)
+# define G4X_DAP_DONE (1 << 21)
+# define G4X_MAWB_DONE (1 << 20)
+# define G4X_MT_IDLE (1 << 19)
+# define G4X_GBLT_BUSY (1 << 18)
+# define G4X_SVSM_DONE (1 << 17)
+# define G4X_MASM_DONE (1 << 16)
+# define G4X_QC_DONE (1 << 15)
+# define G4X_FL_DONE (1 << 14)
+# define G4X_SC_DONE (1 << 13)
+# define G4X_DM_DONE (1 << 12)
+# define G4X_FT_DONE (1 << 11)
+# define G4X_DG_DONE (1 << 10)
+# define G4X_SI_DONE (1 << 9)
+# define G4X_SO_DONE (1 << 8)
+# define G4X_PL_DONE (1 << 7)
+# define G4X_WIZ_DONE (1 << 6)
+# define G4X_URB_DONE (1 << 5)
+# define G4X_SF_DONE (1 << 4)
+# define G4X_CL_DONE (1 << 3)
+# define G4X_GS_DONE (1 << 2)
+# define G4X_VS0_DONE (1 << 1)
+# define G4X_VF_DONE (1 << 0)
+
+#define GEN6_INSTDONE_2 0x207c
+# define GEN6_GAM_DONE (1UL << 31)
+# define GEN6_CS_DONE (1UL << 30)
+# define GEN6_WMBE_DONE (1 << 29)
+# define GEN6_SVRW_DONE (1 << 28)
+# define GEN6_RCC_DONE (1 << 27)
+# define GEN6_SVG_DONE (1 << 26)
+# define GEN6_ISC_DONE (1 << 25)
+# define GEN6_MT_DONE (1 << 24)
+# define GEN6_RCPFE_DONE (1 << 23)
+# define GEN6_RCPBE_DONE (1 << 22)
+# define GEN6_VDI_DONE (1 << 21)
+# define GEN6_RCZ_DONE (1 << 20)
+# define GEN6_DAP_DONE (1 << 19)
+# define GEN6_PSD_DONE (1 << 18)
+# define GEN6_IZ_DONE (1 << 17)
+# define GEN6_WMFE_DONE (1 << 16)
+# define GEN6_SVSM_DONE (1 << 15)
+# define GEN6_QC_DONE (1 << 14)
+# define GEN6_FL_DONE (1 << 13)
+# define GEN6_SC_DONE (1 << 12)
+# define GEN6_DM_DONE (1 << 11)
+# define GEN6_FT_DONE (1 << 10)
+# define GEN6_DG_DONE (1 << 9)
+# define GEN6_SI_DONE (1 << 8)
+# define GEN6_SO_DONE (1 << 7)
+# define GEN6_PL_DONE (1 << 6)
+# define GEN6_VME_DONE (1 << 5)
+# define GEN6_SF_DONE (1 << 4)
+# define GEN6_CL_DONE (1 << 3)
+# define GEN6_GS_DONE (1 << 2)
+# define GEN6_VS0_DONE (1 << 1)
+# define GEN6_VF_DONE (1 << 0)
+
+#define IA_VERTICES_COUNT_QW 0x2310
+#define IA_PRIMITIVES_COUNT_QW 0x2318
+#define VS_INVOCATION_COUNT_QW 0x2320
+#define GS_INVOCATION_COUNT_QW 0x2328
+#define GS_PRIMITIVES_COUNT_QW 0x2330
+#define CL_INVOCATION_COUNT_QW 0x2338
+#define CL_PRIMITIVES_COUNT_QW 0x2340
+#define PS_INVOCATION_COUNT_QW 0x2348
+#define PS_DEPTH_COUNT_QW 0x2350
+#define TIMESTAMP_QW 0x2358
+#define CLKCMP_QW 0x2360
+
+#define I915_GCFGC 0xf0
+
+
+/* Special gtt memory types */
+#define AGP_DCACHE_MEMORY 1
+#define AGP_PHYS_MEMORY 2
+
+/* New caching attributes for gen6/sandybridge */
+#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
+#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
+
+/* flag for GFDT type */
+#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
+
+#endif /* _I915_REG_H_ */
diff --git a/usr/src/uts/intel/io/i915/i915_suspend.c b/usr/src/uts/intel/io/i915/i915_suspend.c
new file mode 100644
index 0000000..14ed727
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_suspend.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ *
+ * Copyright 2008, 2013, (c) Intel Corporation
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "intel_drv.h"
+#include "i915_reg.h"
+
+static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE8(index_port, reg);
+ return I915_READ8(data_port);
+}
+
+static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_READ8(st01);
+ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
+ return I915_READ8(VGA_AR_DATA_READ);
+}
+
+static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_READ8(st01);
+ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
+ I915_WRITE8(VGA_AR_DATA_WRITE, val);
+}
+
+static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE8(index_port, reg);
+ I915_WRITE8(data_port, val);
+}
+
+static void i915_save_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+ u16 cr_index, cr_data, st01;
+
+ /* VGA state */
+ dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
+ dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
+ dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
+ dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
+
+ /* VGA color palette registers */
+ dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
+
+ /* MSR bits */
+ dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
+ if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
+ cr_index = VGA_CR_INDEX_CGA;
+ cr_data = VGA_CR_DATA_CGA;
+ st01 = VGA_ST01_CGA;
+ } else {
+ cr_index = VGA_CR_INDEX_MDA;
+ cr_data = VGA_CR_DATA_MDA;
+ st01 = VGA_ST01_MDA;
+ }
+
+ /* CRT controller regs */
+ i915_write_indexed(dev, cr_index, cr_data, 0x11,
+ i915_read_indexed(dev, cr_index, cr_data, 0x11) &
+ (~0x80));
+ for (i = 0; i <= 0x24; i++)
+ dev_priv->regfile.saveCR[i] =
+ i915_read_indexed(dev, cr_index, cr_data, i);
+ /* Make sure we don't turn off CR group 0 writes */
+ dev_priv->regfile.saveCR[0x11] &= ~0x80;
+
+ /* Attribute controller registers */
+ I915_READ8(st01);
+ dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
+ for (i = 0; i <= 0x14; i++)
+ dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
+ I915_READ8(st01);
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
+ I915_READ8(st01);
+
+ /* Graphics controller registers */
+ for (i = 0; i < 9; i++)
+ dev_priv->regfile.saveGR[i] =
+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
+
+ dev_priv->regfile.saveGR[0x10] =
+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
+ dev_priv->regfile.saveGR[0x11] =
+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
+ dev_priv->regfile.saveGR[0x18] =
+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
+
+ /* Sequencer registers */
+ for (i = 0; i < 8; i++)
+ dev_priv->regfile.saveSR[i] =
+ i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
+}
+
+static void i915_restore_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+ u16 cr_index, cr_data, st01;
+
+ /* VGA state */
+ I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
+
+ I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
+ I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
+ I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
+ POSTING_READ(VGA_PD);
+ udelay(150);
+
+ /* MSR bits */
+ I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
+ if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
+ cr_index = VGA_CR_INDEX_CGA;
+ cr_data = VGA_CR_DATA_CGA;
+ st01 = VGA_ST01_CGA;
+ } else {
+ cr_index = VGA_CR_INDEX_MDA;
+ cr_data = VGA_CR_DATA_MDA;
+ st01 = VGA_ST01_MDA;
+ }
+
+ /* Sequencer registers, don't write SR07 */
+ for (i = 0; i < 7; i++)
+ i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
+ dev_priv->regfile.saveSR[i]);
+
+ /* CRT controller regs */
+ /* Enable CR group 0 writes */
+ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
+ for (i = 0; i <= 0x24; i++)
+ i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
+
+ /* Graphics controller regs */
+ for (i = 0; i < 9; i++)
+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
+ dev_priv->regfile.saveGR[i]);
+
+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
+ dev_priv->regfile.saveGR[0x10]);
+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
+ dev_priv->regfile.saveGR[0x11]);
+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
+ dev_priv->regfile.saveGR[0x18]);
+
+ /* Attribute controller registers */
+ I915_READ8(st01); /* switch back to index mode */
+ for (i = 0; i <= 0x14; i++)
+ i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
+ I915_READ8(st01); /* switch back to index mode */
+ I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
+ I915_READ8(st01);
+
+ /* VGA color palette registers */
+ I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
+}
+
+static void i915_save_display(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ /* Display arbitration control */
+ if (INTEL_INFO(dev)->gen <= 4)
+ dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
+
+ /* This is only meaningful in non-KMS mode */
+ /* Don't regfile.save them in KMS mode */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_save_display_reg(dev);
+
+ spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+
+ /* LVDS state */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+ dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+ } else {
+ dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
+ if (INTEL_INFO(dev)->gen >= 4)
+ dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ dev_priv->regfile.saveLVDS = I915_READ(LVDS);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+ dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+ dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
+ } else {
+ dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+ dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+ dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ }
+
+ /* Only regfile.save FBC state on the platform that supports FBC */
+ if (I915_HAS_FBC(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
+ } else if (IS_GM45(dev)) {
+ dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ } else {
+ dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ }
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_save_vga(dev);
+}
+
+static void i915_restore_display(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u64 mask = 0xffffffff;
+ unsigned long flags;
+
+ /* Display arbitration */
+ if (INTEL_INFO(dev)->gen <= 4)
+ I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_restore_display_reg(dev);
+
+ spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+
+ /* LVDS state */
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ mask = ~LVDS_PORT_EN;
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
+ else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+ I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
+
+ if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
+ I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+ /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+ * otherwise we get blank eDP screen after S3 on some machines
+ */
+ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+ I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+ I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+ I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+ I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+ I915_WRITE(RSTDBYCTL,
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
+ } else {
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+ I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+ I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+ I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+ I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+ I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+
+ /* only restore FBC info on the platform that supports FBC*/
+ intel_disable_fbc(dev);
+ if (I915_HAS_FBC(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
+ } else if (IS_GM45(dev)) {
+ I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
+ } else {
+ I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
+ }
+ }
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_restore_vga(dev);
+ else
+ i915_redisable_vga(dev);
+}
+
+int i915_save_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
+
+ mutex_lock(&dev->struct_mutex);
+
+ i915_save_display(dev);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveDEIER = I915_READ(DEIER);
+ dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
+ dev_priv->regfile.saveGTIER = I915_READ(GTIER);
+ dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
+ dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+ dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
+ dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
+ I915_READ(RSTDBYCTL);
+ dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
+ } else {
+ dev_priv->regfile.saveIER = I915_READ(IER);
+ dev_priv->regfile.saveIMR = I915_READ(IMR);
+ }
+ }
+
+ intel_disable_gt_powersave(dev);
+
+ /* Cache mode state */
+ dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+
+ /* Memory Arbitration state */
+ dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+
+ /* Scratch space */
+ for (i = 0; i < 16; i++) {
+ dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+ dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+ }
+ for (i = 0; i < 3; i++)
+ dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+int i915_restore_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
+
+ mutex_lock(&dev->struct_mutex);
+
+ i915_gem_restore_fences(dev);
+ i915_restore_display(dev);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* Interrupt state */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
+ I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
+ I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
+ I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
+ I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
+ I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
+ I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
+ } else {
+ I915_WRITE(IER, dev_priv->regfile.saveIER);
+ I915_WRITE(IMR, dev_priv->regfile.saveIMR);
+ }
+ }
+
+ /* Cache mode state */
+ I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
+
+ /* Memory arbitration state */
+ I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
+
+ for (i = 0; i < 16; i++) {
+ I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
+ I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
+ }
+ for (i = 0; i < 3; i++)
+ I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ intel_i2c_reset(dev);
+
+ return 0;
+}
diff --git a/usr/src/uts/intel/io/i915/i915_ums.c b/usr/src/uts/intel/io/i915/i915_ums.c
new file mode 100644
index 0000000..c3235d0
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/i915_ums.c
@@ -0,0 +1,503 @@
+/*
+ *
+ * Copyright 2008 (c) Intel Corporation
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ * Copyright 2013 (c) Intel Corporation
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "intel_drv.h"
+#include "i915_reg.h"
+
+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpll_reg;
+
+ /* On IVB, 3rd pipe shares PLL with another one */
+ if (pipe > 1)
+ return false;
+
+ if (HAS_PCH_SPLIT(dev))
+ dpll_reg = PCH_DPLL(pipe);
+ else
+ dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
+
+ return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
+}
+
+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->regfile.save_palette_a;
+ else
+ array = dev_priv->regfile.save_palette_b;
+
+ for (i = 0; i < 256; i++)
+ array[i] = I915_READ(reg + (i << 2));
+}
+
+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (HAS_PCH_SPLIT(dev))
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->regfile.save_palette_a;
+ else
+ array = dev_priv->regfile.save_palette_b;
+
+ for (i = 0; i < 256; i++)
+ I915_WRITE(reg + (i << 2), array[i]);
+}
+
+void i915_save_display_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Cursor state */
+ dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
+ if (IS_GEN2(dev))
+ dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+ dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+ }
+
+ /* Pipe & plane A info */
+ dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
+ } else {
+ dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
+ dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
+ dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
+ }
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
+ if (!HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->regfile.saveTRANSACONF = I915_READ(_PCH_TRANSACONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_PCH_TRANS_HTOTAL_A);
+ dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_PCH_TRANS_HBLANK_A);
+ dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_PCH_TRANS_HSYNC_A);
+ dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_PCH_TRANS_VTOTAL_A);
+ dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_PCH_TRANS_VBLANK_A);
+ dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_PCH_TRANS_VSYNC_A);
+ }
+
+ dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+ }
+ i915_save_palette(dev, PIPE_A);
+ dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
+
+ /* Pipe & plane B info */
+ dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
+ } else {
+ dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
+ dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
+ dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
+ }
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
+ if (!HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->regfile.saveTRANSBCONF = I915_READ(_PCH_TRANSBCONF);
+ dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_PCH_TRANS_HTOTAL_B);
+ dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_PCH_TRANS_HBLANK_B);
+ dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_PCH_TRANS_HSYNC_B);
+ dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_PCH_TRANS_VTOTAL_B);
+ dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_PCH_TRANS_VBLANK_B);
+ dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_PCH_TRANS_VSYNC_B);
+ }
+
+ dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+ }
+ i915_save_palette(dev, PIPE_B);
+ dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+ }
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
+ else
+ dev_priv->regfile.saveADPA = I915_READ(ADPA);
+
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->regfile.saveDP_B = I915_READ(DP_B);
+ dev_priv->regfile.saveDP_C = I915_READ(DP_C);
+ dev_priv->regfile.saveDP_D = I915_READ(DP_D);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_DATA_M_G4X);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_DATA_M_G4X);
+ dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_DATA_N_G4X);
+ dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_DATA_N_G4X);
+ dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_LINK_M_G4X);
+ dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_LINK_M_G4X);
+ dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_LINK_N_G4X);
+ dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_LINK_N_G4X);
+ }
+ /* FIXME: regfile.save TV & SDVO state */
+
+ return;
+}
+
+void i915_restore_display_reg(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dpll_a_reg, fpa0_reg, fpa1_reg;
+ int dpll_b_reg, fpb0_reg, fpb1_reg;
+ int i;
+
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_DATA_M_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_DATA_N_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_DATA_N_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_LINK_M_G4X, dev_priv->regfile.savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_LINK_M_G4X, dev_priv->regfile.savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_LINK_N_G4X, dev_priv->regfile.savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_LINK_N_G4X, dev_priv->regfile.savePIPEB_DP_LINK_N);
+ }
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+ break;
+ case 3:
+ case 2:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
+ break;
+ }
+
+
+ if (HAS_PCH_SPLIT(dev)) {
+ dpll_a_reg = _PCH_DPLL_A;
+ dpll_b_reg = _PCH_DPLL_B;
+ fpa0_reg = _PCH_FPA0;
+ fpb0_reg = _PCH_FPB0;
+ fpa1_reg = _PCH_FPA1;
+ fpb1_reg = _PCH_FPB1;
+ } else {
+ dpll_a_reg = _DPLL_A;
+ dpll_b_reg = _DPLL_B;
+ fpa0_reg = _FPA0;
+ fpb0_reg = _FPB0;
+ fpa1_reg = _FPA1;
+ fpb1_reg = _FPB1;
+ }
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
+ I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
+ }
+
+ /* Pipe & plane A info */
+ /* Prime the clock */
+ if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
+ ~DPLL_VCO_ENABLE);
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
+ }
+ I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
+ I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
+ /* Actually enable it */
+ I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
+ POSTING_READ(dpll_a_reg);
+ udelay(150);
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
+ POSTING_READ(_DPLL_A_MD);
+ }
+ udelay(150);
+
+ /* Restore mode */
+ I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
+
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
+
+ I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
+
+ I915_WRITE(_PCH_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
+ I915_WRITE(_PCH_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
+ I915_WRITE(_PCH_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
+ I915_WRITE(_PCH_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
+ I915_WRITE(_PCH_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
+ I915_WRITE(_PCH_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
+ I915_WRITE(_PCH_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
+ }
+
+ /* Restore plane info */
+ I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
+ }
+
+ I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
+
+ i915_restore_palette(dev, PIPE_A);
+ /* Enable the plane */
+ I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
+ I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
+
+ /* Pipe & plane B info */
+ if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
+ ~DPLL_VCO_ENABLE);
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
+ }
+ I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
+ I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
+ /* Actually enable it */
+ I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
+ POSTING_READ(dpll_b_reg);
+ udelay(150);
+ if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
+ POSTING_READ(_DPLL_B_MD);
+ }
+ udelay(150);
+
+ /* Restore mode */
+ I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
+
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
+
+ I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
+
+ I915_WRITE(_PCH_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
+ I915_WRITE(_PCH_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
+ I915_WRITE(_PCH_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
+ I915_WRITE(_PCH_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
+ I915_WRITE(_PCH_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
+ I915_WRITE(_PCH_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
+ I915_WRITE(_PCH_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
+ }
+
+ /* Restore plane info */
+ I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
+ }
+
+ I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
+
+ i915_restore_palette(dev, PIPE_B);
+ /* Enable the plane */
+ I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
+ I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
+
+ /* Cursor state */
+ I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
+
+ /* CRT state */
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
+ else
+ I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
+
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
+ I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
+ I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
+ }
+ /* FIXME: restore TV & SDVO state */
+
+ return;
+}
diff --git a/usr/src/uts/intel/io/i915/intel_bios.c b/usr/src/uts/intel/io/i915/intel_bios.c
new file mode 100644
index 0000000..717f5bf
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_bios.c
@@ -0,0 +1,759 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_dp_helper.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_bios.h"
+
+#define SLAVE_ADDR1 0x70
+#define SLAVE_ADDR2 0x72
+
+static int panel_type;
+
+static void *
+find_section(struct bdb_header *bdb, int section_id)
+{
+ u8 *base = (u8 *)bdb;
+ int index = 0;
+ u16 total, current_size;
+ u8 current_id;
+
+ /* skip to first section */
+ index += bdb->header_size;
+ total = bdb->bdb_size;
+
+ /* walk the sections looking for section_id */
+ while (index < total) {
+ current_id = *(base + index);
+ index++;
+ current_size = *((u16 *)(uintptr_t)(base + index));
+ index += 2;
+ if (current_id == section_id)
+ return base + index;
+ index += current_size;
+ }
+
+ return NULL;
+}
+
+static u16
+get_blocksize(void *p)
+{
+ u16 *block_ptr, block_size;
+
+ block_ptr = (u16 *)(uintptr_t)((char *)p - 2);
+ block_size = *block_ptr;
+ return block_size;
+}
+
+static void
+fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+ const struct lvds_dvo_timing *dvo_timing)
+{
+ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+ dvo_timing->hactive_lo;
+ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+ dvo_timing->hsync_pulse_width;
+ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+ dvo_timing->vactive_lo;
+ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+ dvo_timing->vsync_off;
+ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+ dvo_timing->vsync_pulse_width;
+ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+ panel_fixed_mode->clock = dvo_timing->clock * 10;
+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+ if (dvo_timing->hsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dvo_timing->vsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+ /* Some VBTs have bogus h/vtotal values */
+ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+ if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+
+ drm_mode_set_name(panel_fixed_mode);
+}
+
+static bool
+lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
+ const struct lvds_dvo_timing *b)
+{
+ if (a->hactive_hi != b->hactive_hi ||
+ a->hactive_lo != b->hactive_lo)
+ return false;
+
+ if (a->hsync_off_hi != b->hsync_off_hi ||
+ a->hsync_off_lo != b->hsync_off_lo)
+ return false;
+
+ if (a->hsync_pulse_width != b->hsync_pulse_width)
+ return false;
+
+ if (a->hblank_hi != b->hblank_hi ||
+ a->hblank_lo != b->hblank_lo)
+ return false;
+
+ if (a->vactive_hi != b->vactive_hi ||
+ a->vactive_lo != b->vactive_lo)
+ return false;
+
+ if (a->vsync_off != b->vsync_off)
+ return false;
+
+ if (a->vsync_pulse_width != b->vsync_pulse_width)
+ return false;
+
+ if (a->vblank_hi != b->vblank_hi ||
+ a->vblank_lo != b->vblank_lo)
+ return false;
+
+ return true;
+}
+
+static const struct lvds_dvo_timing *
+get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
+ int index)
+{
+ /*
+ * the size of fp_timing varies on the different platform.
+ * So calculate the DVO timing relative offset in LVDS data
+ * entry to get the DVO timing entry
+ */
+
+ int lfp_data_size =
+ lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
+ int dvo_timing_offset =
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
+ char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index;
+
+ return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
+}
+
+/* get lvds_fp_timing entry
+ * this function may return NULL if the corresponding entry is invalid
+ */
+static const struct lvds_fp_timing *
+get_lvds_fp_timing(const struct bdb_header *bdb,
+ const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs,
+ int index)
+{
+ /* LINTED */
+ size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
+ /* LINTED */
+ u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
+ size_t ofs;
+
+ if (index >= ARRAY_SIZE(ptrs->ptr))
+ return NULL;
+ ofs = ptrs->ptr[index].fp_timing_offset;
+ if (ofs < data_ofs ||
+ ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
+ return NULL;
+ return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
+}
+
+/* Try to find integrated panel data */
+static void
+parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ const struct bdb_lvds_options *lvds_options;
+ const struct bdb_lvds_lfp_data *lvds_lfp_data;
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+ const struct lvds_dvo_timing *panel_dvo_timing;
+ const struct lvds_fp_timing *fp_timing;
+ struct drm_display_mode *panel_fixed_mode;
+ int i, downclock;
+
+ lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+ if (!lvds_options)
+ return;
+
+ dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
+ if (lvds_options->panel_type == 0xff)
+ return;
+
+ panel_type = lvds_options->panel_type;
+
+ lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+ if (!lvds_lfp_data)
+ return;
+
+ lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
+ if (!lvds_lfp_data_ptrs)
+ return;
+
+ dev_priv->vbt.lvds_vbt = 1;
+
+ panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ lvds_options->panel_type);
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+ if (!panel_fixed_mode)
+ return;
+
+ fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
+
+ dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+
+ DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+
+ /*
+ * enumerate the LVDS panel timing info entry in VBT to check whether
+ * the LVDS downclock is found.
+ */
+ downclock = panel_dvo_timing->clock;
+ for (i = 0; i < 16; i++) {
+ const struct lvds_dvo_timing *dvo_timing;
+
+ dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ i);
+ if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
+ dvo_timing->clock < downclock)
+ downclock = dvo_timing->clock;
+ }
+
+ if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
+ dev_priv->lvds_downclock_avail = 1;
+ dev_priv->lvds_downclock = downclock * 10;
+ DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
+ "Normal Clock %dKHz, downclock %dKHz\n",
+ panel_fixed_mode->clock, 10*downclock);
+ }
+
+ fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ lvds_options->panel_type);
+ if (fp_timing) {
+ /* check the resolution, just to be sure */
+ if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
+ fp_timing->y_res == panel_fixed_mode->vdisplay) {
+ dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
+ DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
+ dev_priv->vbt.bios_lvds_val);
+ }
+ }
+}
+
+/* Try to find sdvo panel data */
+static void
+parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct lvds_dvo_timing *dvo_timing;
+ struct drm_display_mode *panel_fixed_mode;
+ int index;
+
+ index = i915_vbt_sdvo_panel_type;
+ if (index == -2) {
+ DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
+ return;
+ }
+
+ if (index == -1) {
+ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+
+ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+ if (!sdvo_lvds_options)
+ return;
+
+ index = sdvo_lvds_options->panel_type;
+ }
+
+ dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+ if (!dvo_timing)
+ return;
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+ if (!panel_fixed_mode)
+ return;
+
+ fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
+
+ dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
+
+ DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+}
+
+static int intel_bios_ssc_frequency(struct drm_device *dev,
+ bool alternate)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ return alternate ? 66 : 48;
+ case 3:
+ case 4:
+ return alternate ? 100 : 96;
+ default:
+ return alternate ? 100 : 120;
+ }
+}
+
+static void
+parse_general_features(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct bdb_general_features *general;
+
+ general = find_section(bdb, BDB_GENERAL_FEATURES);
+ if (general) {
+ dev_priv->vbt.int_tv_support = general->int_tv_support;
+ dev_priv->vbt.int_crt_support = general->int_crt_support;
+ dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
+ dev_priv->vbt.lvds_ssc_freq =
+ intel_bios_ssc_frequency(dev, general->ssc_freq);
+ dev_priv->vbt.display_clock_mode = general->display_clock_mode;
+ dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support"
+ " %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode"
+ " %d fdi_rx_polarity_inverted %d\n",
+ dev_priv->vbt.int_tv_support,
+ dev_priv->vbt.int_crt_support,
+ dev_priv->vbt.lvds_use_ssc,
+ dev_priv->vbt.lvds_ssc_freq,
+ dev_priv->vbt.display_clock_mode,
+ dev_priv->vbt.fdi_rx_polarity_inverted);
+ }
+}
+
+static void
+parse_general_definitions(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *general;
+
+ general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (general) {
+ u16 block_size = get_blocksize(general);
+ if (block_size >= sizeof(*general)) {
+ int bus_pin = general->crt_ddc_gmbus_pin;
+ DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+ if (intel_gmbus_is_port_valid(bus_pin))
+ dev_priv->vbt.crt_ddc_pin = bus_pin;
+ } else {
+ DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
+ block_size);
+ }
+ }
+}
+
+static void
+parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct sdvo_device_mapping *p_mapping;
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(((struct __bdb_general_definitions *)p_defs)->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ if (p_child->slave_addr != SLAVE_ADDR1 &&
+ p_child->slave_addr != SLAVE_ADDR2) {
+ /*
+ * If the slave address is neither 0x70 nor 0x72,
+ * it is not a SDVO device. Skip it.
+ */
+ continue;
+ }
+ if (p_child->dvo_port != DEVICE_PORT_DVOB &&
+ p_child->dvo_port != DEVICE_PORT_DVOC) {
+ /* skip the incorrect SDVO port */
+ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+ continue;
+ }
+ DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+ " %s port\n",
+ p_child->slave_addr,
+ (p_child->dvo_port == DEVICE_PORT_DVOB) ?
+ "SDVOB" : "SDVOC");
+ p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+ if (!p_mapping->initialized) {
+ p_mapping->dvo_port = p_child->dvo_port;
+ p_mapping->slave_addr = p_child->slave_addr;
+ p_mapping->dvo_wiring = p_child->dvo_wiring;
+ p_mapping->ddc_pin = p_child->ddc_pin;
+ p_mapping->i2c_pin = p_child->i2c_pin;
+ p_mapping->initialized = 1;
+ DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+ p_mapping->dvo_port,
+ p_mapping->slave_addr,
+ p_mapping->dvo_wiring,
+ p_mapping->ddc_pin,
+ p_mapping->i2c_pin);
+ } else {
+ DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
+ "two SDVO device.\n");
+ }
+ if (p_child->slave2_addr) {
+ /* Maybe this is a SDVO device with multiple inputs */
+ /* And the mapping info is not added */
+ DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+ " is a SDVO device with multiple inputs.\n");
+ }
+ count++;
+ }
+
+ if (!count) {
+ /* No SDVO device info is found */
+ DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+ }
+ return;
+}
+
+static void
+parse_driver_features(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct bdb_driver_features *driver;
+
+ driver = find_section(bdb, BDB_DRIVER_FEATURES);
+ if (!driver)
+ return;
+
+ if (SUPPORTS_EDP(dev) &&
+ driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ dev_priv->vbt.edp_support = 1;
+
+ if (driver->dual_frequency)
+ dev_priv->render_reclock_avail = true;
+}
+
+static void
+parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_edp *edp;
+ struct edp_power_seq *edp_pps;
+ struct edp_link_params *edp_link_params;
+
+ edp = find_section(bdb, BDB_EDP);
+ if (!edp) {
+ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support)
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
+ return;
+ }
+
+ switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ case EDP_18BPP:
+ dev_priv->vbt.edp_bpp = 18;
+ break;
+ case EDP_24BPP:
+ dev_priv->vbt.edp_bpp = 24;
+ break;
+ case EDP_30BPP:
+ dev_priv->vbt.edp_bpp = 30;
+ break;
+ }
+
+ /* Get the eDP sequencing and link info */
+ edp_pps = &edp->power_seqs[panel_type];
+ edp_link_params = &edp->link_params[panel_type];
+
+ dev_priv->vbt.edp_pps = *edp_pps;
+
+ dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+ DP_LINK_BW_1_62;
+ switch (edp_link_params->lanes) {
+ case 0:
+ dev_priv->vbt.edp_lanes = 1;
+ break;
+ case 1:
+ dev_priv->vbt.edp_lanes = 2;
+ break;
+ case 3:
+ default:
+ dev_priv->vbt.edp_lanes = 4;
+ break;
+ }
+ switch (edp_link_params->preemphasis) {
+ case 0:
+ dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+ break;
+ case 1:
+ dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+ break;
+ case 2:
+ dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+ break;
+ case 3:
+ dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+ break;
+ }
+ switch (edp_link_params->vswing) {
+ case 0:
+ dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
+ break;
+ case 1:
+ dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
+ break;
+ case 2:
+ dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
+ break;
+ case 3:
+ dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+ break;
+ }
+}
+
+static void
+parse_device_mapping(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child, *child_dev_ptr;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ /* get the number of child device that is present */
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(((struct __bdb_general_definitions *)p_defs)->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ count++;
+ }
+ if (!count) {
+ DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+ return;
+ }
+ dev_priv->vbt.child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
+ if (!dev_priv->vbt.child_dev) {
+ DRM_DEBUG_KMS("No memory space for child device\n");
+ return;
+ }
+
+ dev_priv->vbt.child_dev_num = count;
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(((struct __bdb_general_definitions *)p_defs)->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ child_dev_ptr = dev_priv->vbt.child_dev + count;
+ count++;
+ (void) memcpy((void *)child_dev_ptr, (void *)p_child,
+ sizeof(*p_child));
+ }
+ return;
+}
+
+static void
+init_vbt_defaults(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
+
+ /* LFP panel data */
+ dev_priv->vbt.lvds_dither = 1;
+ dev_priv->vbt.lvds_vbt = 0;
+
+ /* SDVO panel data */
+ dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+
+ /* general features */
+ dev_priv->vbt.int_tv_support = 1;
+ dev_priv->vbt.int_crt_support = 1;
+
+ /* Default to using SSC */
+ dev_priv->vbt.lvds_use_ssc = 1;
+ dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+ DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
+}
+
+/**
+ * intel_init_bios - initialize VBIOS settings & find VBT
+ * @dev: DRM device
+ *
+ * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
+ * to appropriate values.
+ *
+ * Returns 0 on success, nonzero on failure.
+ */
+int
+intel_parse_bios(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ struct bdb_header *bdb = NULL;
+ u8 __iomem *bios = NULL;
+
+ if (HAS_PCH_NOP(dev))
+ return -ENODEV;
+
+ init_vbt_defaults(dev_priv);
+
+ /* XXX Should this validation be moved to intel_opregion.c? */
+ if (dev_priv->opregion.vbt) {
+ struct vbt_header *vbt = dev_priv->opregion.vbt;
+ if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+ DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
+ vbt->signature);
+ bdb = (struct bdb_header *)(uintptr_t)((char *)vbt + vbt->bdb_offset);
+ } else
+ dev_priv->opregion.vbt = NULL;
+ }
+
+ if (bdb == NULL) {
+ struct vbt_header *vbt = NULL;
+ size_t size;
+ int i;
+
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+ return -1;
+
+ /* Scour memory looking for the VBT signature */
+ for (i = 0; i + 4 < size; i++) {
+ if (!memcmp(bios + i, "$VBT", 4)) {
+ vbt = (struct vbt_header *)(bios + i);
+ break;
+ }
+ }
+
+ if (!vbt) {
+ DRM_ERROR("VBT signature missing\n");
+ pci_unmap_rom(pdev, bios);
+ return -1;
+ }
+
+ bdb = (struct bdb_header *)(uintptr_t)(bios + i + vbt->bdb_offset);
+ }
+
+ /* Grab useful general definitions */
+ parse_general_features(dev_priv, bdb);
+ parse_general_definitions(dev_priv, bdb);
+ parse_lfp_panel_data(dev_priv, bdb);
+ parse_sdvo_panel_data(dev_priv, bdb);
+ parse_sdvo_device_mapping(dev_priv, bdb);
+ parse_device_mapping(dev_priv, bdb);
+ parse_driver_features(dev_priv, bdb);
+ parse_edp(dev_priv, bdb);
+
+ if (bios)
+ pci_unmap_rom(pdev, bios);
+
+ return 0;
+}
+
+/* Ensure that vital registers have been initialised, even if the BIOS
+ * is absent or just failing to do its job.
+ */
+void intel_setup_bios(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Set the Panel Power On/Off timings if uninitialized. */
+ if (!HAS_PCH_SPLIT(dev) &&
+ I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
+ /* Set T2 to 40ms and T5 to 200ms */
+ I915_WRITE(PP_ON_DELAYS, 0x019007d0);
+
+ /* Set T3 to 35ms and Tx to 200ms */
+ I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
+ }
+}
diff --git a/usr/src/uts/intel/io/i915/intel_bios.h b/usr/src/uts/intel/io/i915/intel_bios.h
new file mode 100644
index 0000000..8be5cb0
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_bios.h
@@ -0,0 +1,699 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _I830_BIOS_H_
+#define _I830_BIOS_H_
+
+#include "drmP.h"
+
+#pragma pack(1)
+struct vbt_header {
+ u8 signature[20]; /**< Always starts with 'VBT$' */
+ u16 version; /**< decimal */
+ u16 header_size; /**< in bytes */
+ u16 vbt_size; /**< in bytes */
+ u8 vbt_checksum;
+ u8 reserved0;
+ u32 bdb_offset; /**< from beginning of VBT */
+ u32 aim_offset[4]; /**< from beginning of VBT */
+} __attribute__((packed));
+#pragma pack()
+
+struct bdb_header {
+ u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
+ u16 version; /**< decimal */
+ u16 header_size; /**< in bytes */
+ u16 bdb_size; /**< in bytes */
+};
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+#pragma pack(1)
+struct vbios_data {
+ u8 type; /* 0 == desktop, 1 == mobile */
+ u8 relstage;
+ u8 chipset;
+ u8 lvds_present:1;
+ u8 tv_present:1;
+ u8 rsvd2:6; /* finish byte */
+ u8 rsvd3[4];
+ u8 signon[155];
+ u8 copyright[61];
+ u16 code_segment;
+ u8 dos_boot_mode;
+ u8 bandwidth_percent;
+ u8 rsvd4; /* popup memory size */
+ u8 resize_pci_bios;
+ u8 rsvd5; /* is crt already on ddc2 */
+} __attribute__((packed));
+#pragma pack()
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES 1
+#define BDB_GENERAL_DEFINITIONS 2
+#define BDB_OLD_TOGGLE_LIST 3
+#define BDB_MODE_SUPPORT_LIST 4
+#define BDB_GENERIC_MODE_TABLE 5
+#define BDB_EXT_MMIO_REGS 6
+#define BDB_SWF_IO 7
+#define BDB_SWF_MMIO 8
+#define BDB_DOT_CLOCK_TABLE 9
+#define BDB_MODE_REMOVAL_TABLE 10
+#define BDB_CHILD_DEVICE_TABLE 11
+#define BDB_DRIVER_FEATURES 12
+#define BDB_DRIVER_PERSISTENCE 13
+#define BDB_EXT_TABLE_PTRS 14
+#define BDB_DOT_CLOCK_OVERRIDE 15
+#define BDB_DISPLAY_SELECT 16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION 18
+#define BDB_DISPLAY_REMOVE 19
+#define BDB_OEM_CUSTOM 20
+#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS 22
+#define BDB_SDVO_PANEL_DTDS 23
+#define BDB_SDVO_LVDS_PNP_IDS 24
+#define BDB_SDVO_LVDS_POWER_SEQ 25
+#define BDB_TV_OPTIONS 26
+#define BDB_EDP 27
+#define BDB_LVDS_OPTIONS 40
+#define BDB_LVDS_LFP_DATA_PTRS 41
+#define BDB_LVDS_LFP_DATA 42
+#define BDB_LVDS_BACKLIGHT 43
+#define BDB_LVDS_POWER 44
+#define BDB_SKIP 254 /* VBIOS private block, ignore */
+
+#pragma pack(1)
+struct bdb_general_features {
+ /* bits 1 */
+ u8 panel_fitting:2;
+ u8 flexaim:1;
+ u8 msg_enable:1;
+ u8 clear_screen:3;
+ u8 color_flip:1;
+
+ /* bits 2 */
+ u8 download_ext_vbt:1;
+ u8 enable_ssc:1;
+ u8 ssc_freq:1;
+ u8 enable_lfp_on_override:1;
+ u8 disable_ssc_ddt:1;
+ u8 rsvd7:1;
+ u8 display_clock_mode:1;
+ u8 rsvd8:1; /* finish byte */
+
+ /* bits 3 */
+ u8 disable_smooth_vision:1;
+ u8 single_dvi:1;
+ u8 rsvd9:1;
+ u8 fdi_rx_polarity_inverted:1;
+ u8 rsvd10:4; /* finish byte */
+
+ /* bits 4 */
+ u8 legacy_monitor_detect;
+
+ /* bits 5 */
+ u8 int_crt_support:1;
+ u8 int_tv_support:1;
+ u8 int_efp_support:1;
+ u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
+ u8 rsvd11:3; /* finish byte */
+} __attribute__((packed));
+#pragma pack()
+
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE 0x00
+#define DEVICE_TYPE_CRT 0x01
+#define DEVICE_TYPE_TV 0x09
+#define DEVICE_TYPE_EFP 0x12
+#define DEVICE_TYPE_LFP 0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS 0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
+#define DEVICE_TYPE_TV_COMPOSITE 0x0209
+#define DEVICE_TYPE_TV_MACROVISION 0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
+#define DEVICE_TYPE_TV_SCART 0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
+#define DEVICE_TYPE_EFP_DVI_I 0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
+#define DEVICE_TYPE_LFP_PANELLINK 0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+
+#define DEVICE_CFG_NONE 0x00
+#define DEVICE_CFG_12BIT_DVOB 0x01
+#define DEVICE_CFG_12BIT_DVOC 0x02
+#define DEVICE_CFG_24BIT_DVOBC 0x09
+#define DEVICE_CFG_24BIT_DVOCB 0x0a
+#define DEVICE_CFG_DUAL_DVOB 0x11
+#define DEVICE_CFG_DUAL_DVOC 0x12
+#define DEVICE_CFG_DUAL_DVOBC 0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
+
+#define DEVICE_WIRE_NONE 0x00
+#define DEVICE_WIRE_DVOB 0x01
+#define DEVICE_WIRE_DVOC 0x02
+#define DEVICE_WIRE_DVOBC 0x03
+#define DEVICE_WIRE_DVOBB 0x05
+#define DEVICE_WIRE_DVOCC 0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB 0x01
+#define DEVICE_PORT_DVOC 0x02
+
+#pragma pack(1)
+struct child_device_config {
+ u16 handle;
+ u16 device_type;
+ u8 device_id[10]; /* ascii string */
+ u16 addin_offset;
+ u8 dvo_port; /* See Device_PORT_* above */
+ u8 i2c_pin;
+ u8 slave_addr;
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_cfg; /* See DEVICE_CFG_* above */
+ u8 dvo2_port;
+ u8 i2c2_pin;
+ u8 slave2_addr;
+ u8 ddc2_pin;
+ u8 capabilities;
+ u8 dvo_wiring;/* See DEVICE_WIRE_* above */
+ u8 dvo2_wiring;
+ u16 extended_type;
+ u8 dvo_function;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct bdb_general_definitions {
+ /* DDC GPIO */
+ u8 crt_ddc_gmbus_pin;
+
+ /* DPMS bits */
+ u8 dpms_acpi:1;
+ u8 skip_boot_crt_detect:1;
+ u8 dpms_aim:1;
+ u8 rsvd1:5; /* finish byte */
+
+ /* boot device bits */
+ u8 boot_display[2];
+ u8 child_dev_size;
+
+ /*
+ * Device info:
+ * If TV is present, it'll be at devices[0].
+ * LVDS will be next, either devices[0] or [1], if present.
+ * On some platforms the number of device is 6. But could be as few as
+ * 4 if both TV and LVDS are missing.
+ * And the device num is related with the size of general definition
+ * block. It is obtained by using the following formula:
+ * number = (block_size - sizeof(bdb_general_definitions))/
+ * sizeof(child_device_config);
+ */
+ /* OSOL_i915 struct child_device_config devices[0]; */
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct __bdb_general_definitions {
+ /* DDC GPIO */
+ u8 crt_ddc_gmbus_pin;
+
+ /* DPMS bits */
+ u8 dpms_acpi:1;
+ u8 skip_boot_crt_detect:1;
+ u8 dpms_aim:1;
+ u8 rsvd1:5; /* finish byte */
+
+ /* boot device bits */
+ u8 boot_display[2];
+ u8 child_dev_size;
+
+ /*
+ * Device info:
+ * If TV is present, it'll be at devices[0].
+ * LVDS will be next, either devices[0] or [1], if present.
+ * On some platforms the number of device is 6. But could be as few as
+ * 4 if both TV and LVDS are missing.
+ * And the device num is related with the size of general definition
+ * block. It is obtained by using the following formula:
+ * number = (block_size - sizeof(bdb_general_definitions))/
+ * sizeof(child_device_config);
+ */
+ struct child_device_config devices[6];
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct bdb_lvds_options {
+ u8 panel_type;
+ u8 rsvd1;
+ /* LVDS capabilities, stored in a dword */
+ u8 pfit_mode:2;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_ratio_auto:1;
+ u8 pixel_dither:1;
+ u8 lvds_edid:1;
+ u8 rsvd2:1;
+ u8 rsvd4;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+ u16 fp_timing_offset; /* offsets are from start of bdb */
+ u8 fp_table_size;
+ u16 dvo_timing_offset;
+ u8 dvo_table_size;
+ u16 panel_pnp_id_offset;
+ u8 pnp_table_size;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct bdb_lvds_lfp_data_ptrs {
+ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+ struct bdb_lvds_lfp_data_ptr ptr[16];
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+ u16 x_res;
+ u16 y_res;
+ u32 lvds_reg;
+ u32 lvds_reg_val;
+ u32 pp_on_reg;
+ u32 pp_on_reg_val;
+ u32 pp_off_reg;
+ u32 pp_off_reg_val;
+ u32 pp_cycle_reg;
+ u32 pp_cycle_reg_val;
+ u32 pfit_reg;
+ u32 pfit_reg_val;
+ u16 terminator;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct lvds_dvo_timing {
+ u16 clock; /**< In 10khz */
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_off_lo;
+ u8 hsync_pulse_width;
+ u8 vsync_pulse_width:4;
+ u8 vsync_off:4;
+ u8 rsvd0:6;
+ u8 hsync_off_hi:2;
+ u8 h_image;
+ u8 v_image;
+ u8 max_hv;
+ u8 h_border;
+ u8 v_border;
+ u8 rsvd1:3;
+ u8 digital:2;
+ u8 vsync_positive:1;
+ u8 hsync_positive:1;
+ u8 rsvd2:1;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct lvds_pnp_id {
+ u16 mfg_name;
+ u16 product_code;
+ u32 serial;
+ u8 mfg_week;
+ u8 mfg_year;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct bdb_lvds_lfp_data_entry {
+ struct lvds_fp_timing fp_timing;
+ struct lvds_dvo_timing dvo_timing;
+ struct lvds_pnp_id pnp_id;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct bdb_lvds_lfp_data {
+ struct bdb_lvds_lfp_data_entry data[16];
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct aimdb_header {
+ char signature[16];
+ char oem_device[20];
+ u16 aimdb_version;
+ u16 aimdb_header_size;
+ u16 aimdb_size;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct aimdb_block {
+ u8 aimdb_id;
+ u16 aimdb_size;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct vch_panel_data {
+ u16 fp_timing_offset;
+ u8 fp_timing_size;
+ u16 dvo_timing_offset;
+ u8 dvo_timing_size;
+ u16 text_fitting_offset;
+ u8 text_fitting_size;
+ u16 graphics_fitting_offset;
+ u8 graphics_fitting_size;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct vch_bdb_22 {
+ struct aimdb_block aimdb_block;
+ struct vch_panel_data panels[16];
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct bdb_sdvo_lvds_options {
+ u8 panel_backlight;
+ u8 h40_set_panel_type;
+ u8 panel_type;
+ u8 ssc_clk_freq;
+ u16 als_low_trip;
+ u16 als_high_trip;
+ u8 sclalarcoeff_tab_row_num;
+ u8 sclalarcoeff_tab_row_size;
+ u8 coefficient[8];
+ u8 panel_misc_bits_1;
+ u8 panel_misc_bits_2;
+ u8 panel_misc_bits_3;
+ u8 panel_misc_bits_4;
+} __attribute__((packed));
+#pragma pack()
+
+
+#define BDB_DRIVER_FEATURE_NO_LVDS 0
+#define BDB_DRIVER_FEATURE_INT_LVDS 1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
+#define BDB_DRIVER_FEATURE_EDP 3
+
+#pragma pack(1)
+struct bdb_driver_features {
+ u8 boot_dev_algorithm:1;
+ u8 block_display_switch:1;
+ u8 allow_display_switch:1;
+ u8 hotplug_dvo:1;
+ u8 dual_view_zoom:1;
+ u8 int15h_hook:1;
+ u8 sprite_in_clone:1;
+ u8 primary_lfp_id:1;
+
+ u16 boot_mode_x;
+ u16 boot_mode_y;
+ u8 boot_mode_bpp;
+ u8 boot_mode_refresh;
+
+ u16 enable_lfp_primary:1;
+ u16 selective_mode_pruning:1;
+ u16 dual_frequency:1;
+ u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+ u16 nt_clone_support:1;
+ u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+ u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+ u16 cui_aspect_scaling:1;
+ u16 preserve_aspect_ratio:1;
+ u16 sdvo_device_power_down:1;
+ u16 crt_hotplug:1;
+ u16 lvds_config:2;
+ u16 tv_hotplug:1;
+ u16 hdmi_config:2;
+
+ u8 static_display:1;
+ u8 reserved2:7;
+ u16 legacy_crt_max_x;
+ u16 legacy_crt_max_y;
+ u8 legacy_crt_max_refresh;
+
+ u8 hdmi_termination;
+ u8 custom_vbt_version;
+} __attribute__((packed));
+#pragma pack()
+
+#define EDP_18BPP 0
+#define EDP_24BPP 1
+#define EDP_30BPP 2
+#define EDP_RATE_1_62 0
+#define EDP_RATE_2_7 1
+#define EDP_LANE_1 0
+#define EDP_LANE_2 1
+#define EDP_LANE_4 3
+#define EDP_PREEMPHASIS_NONE 0
+#define EDP_PREEMPHASIS_3_5dB 1
+#define EDP_PREEMPHASIS_6dB 2
+#define EDP_PREEMPHASIS_9_5dB 3
+#define EDP_VSWING_0_4V 0
+#define EDP_VSWING_0_6V 1
+#define EDP_VSWING_0_8V 2
+#define EDP_VSWING_1_2V 3
+
+#pragma pack(1)
+struct edp_power_seq {
+ u16 t1_t3;
+ u16 t8;
+ u16 t9;
+ u16 t10;
+ u16 t11_t12;
+} __attribute__ ((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct edp_link_params {
+ u8 rate:4;
+ u8 lanes:4;
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __attribute__ ((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct bdb_edp {
+ struct edp_power_seq power_seqs[16];
+ u32 color_depth;
+ struct edp_link_params link_params[16];
+ u32 sdrrs_msa_timing_delay;
+
+ /* ith bit indicates enabled/disabled for (i+1)th panel */
+ u16 edp_s3d_feature;
+ u16 edp_t3_optimization;
+} __attribute__ ((packed));
+#pragma pack()
+
+void intel_setup_bios(struct drm_device *dev);
+int intel_parse_bios(struct drm_device *dev);
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
+#define GR18_HK_NONE (0x0<<3)
+#define GR18_HK_LFP_STRETCH (0x1<<3)
+#define GR18_HK_TOGGLE_DISP (0x2<<3)
+#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
+#define GR18_HK_POPUP_DISABLED (0x6<<3)
+#define GR18_HK_POPUP_ENABLED (0x7<<3)
+#define GR18_HK_PFIT (0x8<<3)
+#define GR18_HK_APM_CHANGE (0xa<<3)
+#define GR18_HK_MULTIPLE (0xc<<3)
+#define GR18_USER_INT_EN (1<<2)
+#define GR18_A0000_FLUSH_EN (1<<1)
+#define GR18_SMM_EN (1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT 16
+#define SWF00_XRES_SHIFT 0
+#define SWF00_RES_MASK 0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT 8
+#define SWF01_TV1_FORMAT_SHIFT 0
+#define SWF01_TV_FORMAT_MASK 0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
+#define SWF10_GTT_OVERRIDE_EN (1<<28)
+#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define SWF10_OLD_TOGGLE 0x0
+#define SWF10_TOGGLE_LIST_1 0x1
+#define SWF10_TOGGLE_LIST_2 0x2
+#define SWF10_TOGGLE_LIST_3 0x3
+#define SWF10_TOGGLE_LIST_4 0x4
+#define SWF10_PANNING_EN (1<<23)
+#define SWF10_DRIVER_LOADED (1<<22)
+#define SWF10_EXTENDED_DESKTOP (1<<21)
+#define SWF10_EXCLUSIVE_MODE (1<<20)
+#define SWF10_OVERLAY_EN (1<<19)
+#define SWF10_PLANEB_HOLDOFF (1<<18)
+#define SWF10_PLANEA_HOLDOFF (1<<17)
+#define SWF10_VGA_HOLDOFF (1<<16)
+#define SWF10_ACTIVE_DISP_MASK 0xffff
+#define SWF10_PIPEB_LFP2 (1<<15)
+#define SWF10_PIPEB_EFP2 (1<<14)
+#define SWF10_PIPEB_TV2 (1<<13)
+#define SWF10_PIPEB_CRT2 (1<<12)
+#define SWF10_PIPEB_LFP (1<<11)
+#define SWF10_PIPEB_EFP (1<<10)
+#define SWF10_PIPEB_TV (1<<9)
+#define SWF10_PIPEB_CRT (1<<8)
+#define SWF10_PIPEA_LFP2 (1<<7)
+#define SWF10_PIPEA_EFP2 (1<<6)
+#define SWF10_PIPEA_TV2 (1<<5)
+#define SWF10_PIPEA_CRT2 (1<<4)
+#define SWF10_PIPEA_LFP (1<<3)
+#define SWF10_PIPEA_EFP (1<<2)
+#define SWF10_PIPEA_TV (1<<1)
+#define SWF10_PIPEA_CRT (1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT 16
+#define SWF11_SV_TEST_EN (1<<15)
+#define SWF11_IS_AGP (1<<14)
+#define SWF11_DISPLAY_HOLDOFF (1<<13)
+#define SWF11_DPMS_REDUCED (1<<12)
+#define SWF11_IS_VBE_MODE (1<<11)
+#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK 0x07
+#define SWF11_DPMS_OFF (1<<2)
+#define SWF11_DPMS_SUSPEND (1<<1)
+#define SWF11_DPMS_STANDBY (1<<0)
+#define SWF11_DPMS_ON 0
+
+#define SWF14_GFX_PFIT_EN (1<<31)
+#define SWF14_TEXT_PFIT_EN (1<<30)
+#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN (1<<28)
+#define SWF14_DISPLAY_HOLDOFF (1<<27)
+#define SWF14_DISP_DETECT_EN (1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS (1<<24)
+#define SWF14_OS_TYPE_WIN9X (1<<23)
+#define SWF14_OS_TYPE_WINNT (1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK 0x00070000
+#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
+#define SWF14_PM_ACPI (0x3 << 16)
+#define SWF14_PM_APM_12 (0x2 << 16)
+#define SWF14_PM_APM_11 (0x1 << 16)
+#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
+ /* if GR18 indicates a display switch */
+#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define SWF14_DS_PIPEB_TV2_EN (1<<13)
+#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define SWF14_DS_PIPEB_LFP_EN (1<<11)
+#define SWF14_DS_PIPEB_EFP_EN (1<<10)
+#define SWF14_DS_PIPEB_TV_EN (1<<9)
+#define SWF14_DS_PIPEB_CRT_EN (1<<8)
+#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define SWF14_DS_PIPEA_TV2_EN (1<<5)
+#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define SWF14_DS_PIPEA_LFP_EN (1<<3)
+#define SWF14_DS_PIPEA_EFP_EN (1<<2)
+#define SWF14_DS_PIPEA_TV_EN (1<<1)
+#define SWF14_DS_PIPEA_CRT_EN (1<<0)
+ /* if GR18 indicates a panel fitting request */
+#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
+ /* if GR18 indicates an APM change request */
+#define SWF14_APM_HIBERNATE 0x4
+#define SWF14_APM_SUSPEND 0x3
+#define SWF14_APM_STANDBY 0x1
+#define SWF14_APM_RESTORE 0x0
+
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_eDP 0x78C6
+
+/* define the DVO port for HDMI output type */
+#define DVO_B 1
+#define DVO_C 2
+#define DVO_D 3
+
+/* define the PORT for DP output type */
+#define PORT_IDPB 7
+#define PORT_IDPC 8
+#define PORT_IDPD 9
+
+#endif /* _I830_BIOS_H_ */
diff --git a/usr/src/uts/intel/io/i915/intel_crt.c b/usr/src/uts/intel/io/i915/intel_crt.c
new file mode 100644
index 0000000..4b49d2f
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_crt.c
@@ -0,0 +1,825 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006-2007, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_edid.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "drm_sun_i2c.h" /* OSOL_i915 */
+
+/* Here's the desired hotplug mode */
+#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
+ ADPA_CRT_HOTPLUG_WARMUP_10MS | \
+ ADPA_CRT_HOTPLUG_SAMPLE_4S | \
+ ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
+ ADPA_CRT_HOTPLUG_VOLREF_325MV | \
+ ADPA_CRT_HOTPLUG_ENABLE)
+
+struct intel_crt {
+ struct intel_encoder base;
+ /* DPMS state is stored in the connector, which we need in the
+ * encoder's enable/disable callbacks */
+ struct intel_connector *connector;
+ bool force_hotplug_required;
+ u32 adpa_reg;
+};
+
+static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_crt, base);
+}
+
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
+{
+ return container_of(encoder, struct intel_crt, base);
+}
+
+static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 tmp;
+
+ tmp = I915_READ(crt->adpa_reg);
+
+ if (!(tmp & ADPA_DAC_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_crt_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 tmp, flags = 0;
+
+ tmp = I915_READ(crt->adpa_reg);
+
+ if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (tmp & ADPA_VSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ pipe_config->adjusted_mode.flags |= flags;
+}
+
+/* Note: The caller is required to filter out dpms modes not supported by the
+ * platform. */
+static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 temp;
+
+ temp = I915_READ(crt->adpa_reg);
+ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ temp &= ~ADPA_DAC_ENABLE;
+
+ switch(mode) {
+ case DRM_MODE_DPMS_ON:
+ temp |= ADPA_DAC_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ }
+
+ I915_WRITE(crt->adpa_reg, temp);
+}
+
+static void intel_disable_crt(struct intel_encoder *encoder)
+{
+ intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void intel_enable_crt(struct intel_encoder *encoder)
+{
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+
+ intel_crt_set_dpms(encoder, crt->connector->base.dpms);
+}
+
+
+static void intel_crt_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct drm_crtc *crtc;
+ int old_dpms;
+
+ /* PCH platforms and VLV only support on/off. */
+ if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ old_dpms = connector->dpms;
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = encoder->base.crtc;
+ if (!crtc) {
+ encoder->connectors_active = false;
+ return;
+ }
+
+ /* We need the pipe to run for anything but OFF. */
+ if (mode == DRM_MODE_DPMS_OFF)
+ encoder->connectors_active = false;
+ else
+ encoder->connectors_active = true;
+
+ /* We call connector dpms manually below in case pipe dpms doesn't
+ * change due to cloning. */
+ if (mode < old_dpms) {
+ /* From off to on, enable the pipe first. */
+ intel_crtc_update_dpms(crtc);
+
+ intel_crt_set_dpms(encoder, mode);
+ } else {
+ intel_crt_set_dpms(encoder, mode);
+
+ intel_crtc_update_dpms(crtc);
+ }
+
+ intel_modeset_check_state(connector->dev);
+}
+
+static int intel_crt_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+
+ int max_clock = 0;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
+
+ if (IS_GEN2(dev))
+ max_clock = 350000;
+ else
+ max_clock = 400000;
+ if (mode->clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
+ if (HAS_PCH_LPT(dev) &&
+ (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static bool intel_crt_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = encoder->base.dev;
+
+ if (HAS_PCH_SPLIT(dev))
+ pipe_config->has_pch_encoder = true;
+
+ /* LPT FDI RX only supports 8bpc. */
+ if (HAS_PCH_LPT(dev))
+ pipe_config->pipe_bpp = 24;
+
+ return true;
+}
+
+static void intel_crt_mode_set(struct drm_encoder *encoder,
+ /* LINTED */
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crt *crt =
+ intel_encoder_to_crt(to_intel_encoder(encoder));
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 adpa;
+
+ if (HAS_PCH_SPLIT(dev))
+ adpa = ADPA_HOTPLUG_BITS;
+ else
+ adpa = 0;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+ /* For CPT allow 3 pipe config, for others just use A or B */
+ /* LINTED */
+ if (HAS_PCH_LPT(dev))
+ ; /* Those bits don't exist here */
+ else if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
+ else if (intel_crtc->pipe == 0)
+ adpa |= ADPA_PIPE_A_SELECT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
+
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
+
+ I915_WRITE(crt->adpa_reg, adpa);
+}
+
+static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 adpa;
+ bool ret;
+
+ /* The first time through, trigger an explicit detection cycle */
+ if (crt->force_hotplug_required) {
+ bool turn_off_dac = HAS_PCH_SPLIT(dev);
+ u32 save_adpa;
+
+ crt->force_hotplug_required = 0;
+
+ save_adpa = adpa = I915_READ(crt->adpa_reg);
+ DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+ if (turn_off_dac)
+ adpa &= ~ADPA_DAC_ENABLE;
+
+ I915_WRITE(crt->adpa_reg, adpa);
+
+ if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ 1000))
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+
+ if (turn_off_dac) {
+ I915_WRITE(crt->adpa_reg, save_adpa);
+ POSTING_READ(crt->adpa_reg);
+ }
+ }
+
+ /* Check the status to see if both blue and green are on now */
+ adpa = I915_READ(crt->adpa_reg);
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+ ret = true;
+ else
+ ret = false;
+ DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
+
+ return ret;
+}
+
+static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 adpa;
+ bool ret;
+ u32 save_adpa;
+
+ save_adpa = adpa = I915_READ(crt->adpa_reg);
+ DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+
+ I915_WRITE(crt->adpa_reg, adpa);
+
+ if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ 1000)) {
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+ I915_WRITE(crt->adpa_reg, save_adpa);
+ }
+
+ /* Check the status to see if both blue and green are on now */
+ adpa = I915_READ(crt->adpa_reg);
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+ ret = true;
+ else
+ ret = false;
+
+ DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+
+ /* FIXME: debug force function and remove */
+ ret = true;
+
+ return ret;
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * Not for i915G/i915GM
+ *
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
+ */
+static bool intel_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 hotplug_en, orig, stat;
+ bool ret = false;
+ int i, tries = 0;
+
+ if (HAS_PCH_SPLIT(dev))
+ return intel_ironlake_crt_detect_hotplug(connector);
+
+ if (IS_VALLEYVIEW(dev))
+ return valleyview_crt_detect_hotplug(connector);
+
+ /*
+ * On 4 series desktop, CRT detect sequence need to be done twice
+ * to get a reliable result.
+ */
+
+ if (IS_G4X(dev) && !IS_GM45(dev))
+ tries = 2;
+ else
+ tries = 1;
+ hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
+ hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+
+ for (i = 0; i < tries ; i++) {
+ /* turn on the FORCE_DETECT */
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ /* wait for FORCE_DETECT to go off */
+ if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT) == 0,
+ 1000))
+ DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
+ }
+
+ stat = I915_READ(PORT_HOTPLUG_STAT);
+ if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
+ ret = true;
+
+ /* clear the interrupt we just generated, if any */
+ I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+
+ /* and put the bits back */
+ I915_WRITE(PORT_HOTPLUG_EN, orig);
+
+ return ret;
+}
+
+static struct edid *intel_crt_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *i2c)
+{
+ struct edid *edid;
+
+ edid = drm_get_edid(connector, i2c);
+
+ if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+ DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ intel_gmbus_force_bit(i2c, true);
+ edid = drm_get_edid(connector, i2c);
+ intel_gmbus_force_bit(i2c, false);
+ }
+
+ return edid;
+}
+
+/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
+static int intel_crt_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid;
+ int ret;
+
+ edid = intel_crt_get_edid(connector, adapter);
+ if (!edid)
+ return 0;
+
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid, (EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1)));
+ return ret;
+}
+
+static bool intel_crt_detect_ddc(struct drm_connector *connector)
+{
+ struct intel_crt *crt = intel_attached_crt(connector);
+ struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+ struct edid *edid;
+ struct i2c_adapter *i2c;
+
+ BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
+
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
+ edid = intel_crt_get_edid(connector, i2c);
+
+ if (edid) {
+ bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
+
+ kfree(edid, EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1));
+ /*
+ * This may be a DVI-I connector with a shared DDC
+ * link between analog and digital outputs, so we
+ * have to check the EDID input spec of the attached device.
+ */
+ if (!is_digital) {
+ DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+ return true;
+ }
+
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
+ }
+
+ return false;
+}
+
+static enum drm_connector_status
+intel_crt_load_detect(struct intel_crt *crt)
+{
+ struct drm_device *dev = crt->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
+ uint32_t save_bclrpat;
+ uint32_t save_vtotal;
+ uint32_t vtotal, vactive;
+ uint32_t vsample;
+ uint32_t vblank, vblank_start, vblank_end;
+ uint32_t dsl;
+ uint32_t bclrpat_reg;
+ uint32_t vtotal_reg;
+ uint32_t vblank_reg;
+ uint32_t vsync_reg;
+ uint32_t pipeconf_reg;
+ uint32_t pipe_dsl_reg;
+ uint8_t st00;
+ enum drm_connector_status status;
+
+ DRM_DEBUG_KMS("starting load-detect on CRT\n");
+
+ bclrpat_reg = BCLRPAT(pipe);
+ vtotal_reg = VTOTAL(pipe);
+ vblank_reg = VBLANK(pipe);
+ vsync_reg = VSYNC(pipe);
+ pipeconf_reg = PIPECONF(pipe);
+ pipe_dsl_reg = PIPEDSL(pipe);
+
+ save_bclrpat = I915_READ(bclrpat_reg);
+ save_vtotal = I915_READ(vtotal_reg);
+ vblank = I915_READ(vblank_reg);
+
+ vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
+ vactive = (save_vtotal & 0x7ff) + 1;
+
+ vblank_start = (vblank & 0xfff) + 1;
+ vblank_end = ((vblank >> 16) & 0xfff) + 1;
+
+ /* Set the border color to purple. */
+ I915_WRITE(bclrpat_reg, 0x500050);
+
+ if (!IS_GEN2(dev)) {
+ uint32_t pipeconf = I915_READ(pipeconf_reg);
+ I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
+ POSTING_READ(pipeconf_reg);
+ /* Wait for next Vblank to substitue
+ * border color for Color info */
+ intel_wait_for_vblank(dev, pipe);
+ st00 = I915_READ8(VGA_MSR_WRITE);
+ status = ((st00 & (1 << 4)) != 0) ?
+ connector_status_connected :
+ connector_status_disconnected;
+
+ I915_WRITE(pipeconf_reg, pipeconf);
+ } else {
+ bool restore_vblank = false;
+ int count, detect;
+
+ /*
+ * If there isn't any border, add some.
+ * Yes, this will flicker
+ */
+ if (vblank_start <= vactive && vblank_end >= vtotal) {
+ uint32_t vsync = I915_READ(vsync_reg);
+ uint32_t vsync_start = (vsync & 0xffff) + 1;
+
+ vblank_start = vsync_start;
+ I915_WRITE(vblank_reg,
+ (vblank_start - 1) |
+ ((vblank_end - 1) << 16));
+ restore_vblank = true;
+ }
+ /* sample in the vertical border, selecting the larger one */
+ if (vblank_start - vactive >= vtotal - vblank_end)
+ vsample = (vblank_start + vactive) >> 1;
+ else
+ vsample = (vtotal + vblank_end) >> 1;
+
+ /*
+ * Wait for the border to be displayed
+ */
+ while (I915_READ(pipe_dsl_reg) >= vactive)
+ ;
+ while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample)
+ ;
+ /*
+ * Watch ST00 for an entire scanline
+ */
+ detect = 0;
+ count = 0;
+ do {
+ count++;
+ /* Read the ST00 VGA status register */
+ st00 = I915_READ8(VGA_MSR_WRITE);
+ if (st00 & (1 << 4))
+ detect++;
+ } while ((I915_READ(pipe_dsl_reg) == dsl));
+
+ /* restore vblank if necessary */
+ if (restore_vblank)
+ I915_WRITE(vblank_reg, vblank);
+ /*
+ * If more than 3/4 of the scanline detected a monitor,
+ * then it is assumed to be present. This works even on i830,
+ * where there isn't any way to force the border color across
+ * the screen
+ */
+ status = detect * 4 > count * 3 ?
+ connector_status_connected :
+ connector_status_disconnected;
+ }
+
+ /* Restore previous settings */
+ I915_WRITE(bclrpat_reg, save_bclrpat);
+
+ return status;
+}
+
+static enum drm_connector_status
+intel_crt_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
+ enum drm_connector_status status;
+ struct intel_load_detect_pipe tmp;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ /* We can not rely on the HPD pin always being correctly wired
+ * up, for example many KVM do not pass it through, and so
+ * only trust an assertion that the monitor is connected.
+ */
+ if (intel_crt_detect_hotplug(connector)) {
+ DRM_DEBUG_KMS("CRT detected via hotplug\n");
+ return connector_status_connected;
+ } else
+ DRM_DEBUG_KMS("CRT not detected via hotplug\n");
+ }
+
+ if (intel_crt_detect_ddc(connector))
+ return connector_status_connected;
+
+ /* Load detection is broken on HPD capable machines. Whoever wants a
+ * broken monitor (without edid) to work behind a broken kvm (that fails
+ * to have the right resistors for HP detection) needs to fix this up.
+ * For now just bail out. */
+ if (I915_HAS_HOTPLUG(dev))
+ return connector_status_disconnected;
+
+ if (!force)
+ return connector->status;
+
+ /* for pre-945g platforms use load detect */
+ if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
+ if (intel_crt_detect_ddc(connector))
+ status = connector_status_connected;
+ else
+ status = intel_crt_load_detect(crt);
+ intel_release_load_detect_pipe(connector, &tmp);
+ } else
+ status = connector_status_unknown;
+
+ return status;
+}
+
+static void intel_crt_destroy(struct drm_connector *connector)
+{
+ /* OSOL_i915: drm_sysfs_connector_remove(connector); */
+ drm_connector_cleanup(connector);
+ kfree(connector, sizeof (struct intel_connector));
+}
+
+static int intel_crt_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ struct i2c_adapter *i2c;
+
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
+ ret = intel_crt_ddc_get_modes(connector, i2c);
+ if (ret || !IS_G4X(dev))
+ return ret;
+
+ /* Try to probe digital port for output in DVI-I -> VGA mode. */
+ i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
+ return intel_crt_ddc_get_modes(connector, i2c);
+}
+
+/* LINTED */
+static int intel_crt_set_property(struct drm_connector *connector,
+ /* LINTED */
+ struct drm_property *property,
+ /* LINTED */
+ uint64_t value)
+{
+ return 0;
+}
+
+static void intel_crt_reset(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crt *crt = intel_attached_crt(connector);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 adpa;
+
+ adpa = I915_READ(crt->adpa_reg);
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa |= ADPA_HOTPLUG_BITS;
+ I915_WRITE(crt->adpa_reg, adpa);
+ POSTING_READ(crt->adpa_reg);
+
+ DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
+ crt->force_hotplug_required = 1;
+ }
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
+ .mode_set = intel_crt_mode_set,
+};
+
+static const struct drm_connector_funcs intel_crt_connector_funcs = {
+ .reset = intel_crt_reset,
+ .dpms = intel_crt_dpms,
+ .detect = intel_crt_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = intel_crt_destroy,
+ .set_property = intel_crt_set_property,
+};
+
+static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
+ .mode_valid = intel_crt_mode_valid,
+ .get_modes = intel_crt_get_modes,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+void intel_crt_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct intel_crt *crt;
+ struct intel_connector *intel_connector;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Skip machines without VGA that falsely report hotplug events */
+
+ crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
+ if (!crt)
+ return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(crt, sizeof(struct intel_crt));
+ return;
+ }
+
+ connector = &intel_connector->base;
+ crt->connector = intel_connector;
+ (void) drm_connector_init(dev, &intel_connector->base,
+ &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ (void) drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
+ DRM_MODE_ENCODER_DAC);
+
+ intel_connector_attach_encoder(intel_connector, &crt->base);
+
+ crt->base.type = INTEL_OUTPUT_ANALOG;
+ crt->base.type_size = sizeof(struct intel_crt);
+ crt->base.cloneable = true;
+ if (IS_I830(dev))
+ crt->base.crtc_mask = (1 << 0);
+ else
+ crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
+ if (IS_GEN2(dev))
+ connector->interlace_allowed = 0;
+ else
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+ if (HAS_PCH_SPLIT(dev))
+ crt->adpa_reg = PCH_ADPA;
+ else if (IS_VALLEYVIEW(dev))
+ crt->adpa_reg = VLV_ADPA;
+ else
+ crt->adpa_reg = ADPA;
+
+ crt->base.compute_config = intel_crt_compute_config;
+ crt->base.disable = intel_disable_crt;
+ crt->base.enable = intel_enable_crt;
+ crt->base.get_config = intel_crt_get_config;
+ if (I915_HAS_HOTPLUG(dev))
+ crt->base.hpd_pin = HPD_CRT;
+ if (HAS_DDI(dev))
+ crt->base.get_hw_state = intel_ddi_get_hw_state;
+ else
+ crt->base.get_hw_state = intel_crt_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
+ (void) drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+ /* OSOL_i915: drm_sysfs_connector_add(connector); */
+
+ if (!I915_HAS_HOTPLUG(dev))
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ /*
+ * Configure the automatic hotplug detection stuff
+ */
+ crt->force_hotplug_required = 0;
+
+ /*
+ * TODO: find a proper way to discover whether we need to set the the
+ * polarity and link reversal bits or not, instead of relying on the
+ * BIOS.
+ */
+ if (HAS_PCH_LPT(dev)) {
+ u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
+ FDI_RX_LINK_REVERSAL_OVERRIDE;
+
+ dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
+ }
+}
diff --git a/usr/src/uts/intel/io/i915/intel_ddi.c b/usr/src/uts/intel/io/i915/intel_ddi.c
new file mode 100644
index 0000000..34baae5
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_ddi.c
@@ -0,0 +1,1388 @@
+/*
+ * Copyright © 2012-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/* HDMI/DVI modes ignore everything but the last 2 items. So we share
+ * them for both DP and FDI transports, allowing those ports to
+ * automatically adapt to HDMI connections as well
+ */
+static const u32 hsw_ddi_translations_dp[] = {
+ 0x00FFFFFF, 0x0006000E, /* DP parameters */
+ 0x00D75FFF, 0x0005000A,
+ 0x00C30FFF, 0x00040006,
+ 0x80AAAFFF, 0x000B0000,
+ 0x00FFFFFF, 0x0005000A,
+ 0x00D75FFF, 0x000C0004,
+ 0x80C30FFF, 0x000B0000,
+ 0x00FFFFFF, 0x00040006,
+ 0x80D75FFF, 0x000B0000,
+ 0x00FFFFFF, 0x00040006 /* HDMI parameters */
+};
+
+static const u32 hsw_ddi_translations_fdi[] = {
+ 0x00FFFFFF, 0x0007000E, /* FDI parameters */
+ 0x00D75FFF, 0x000F000A,
+ 0x00C30FFF, 0x00060006,
+ 0x00AAAFFF, 0x001E0000,
+ 0x00FFFFFF, 0x000F000A,
+ 0x00D75FFF, 0x00160004,
+ 0x00C30FFF, 0x001E0000,
+ 0x00FFFFFF, 0x00060006,
+ 0x00D75FFF, 0x001E0000,
+ 0x00FFFFFF, 0x00040006 /* HDMI parameters */
+};
+
+static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+ type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+ return intel_dig_port->port;
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ return PORT_E;
+ } else {
+ DRM_ERROR("Invalid DDI encoder type %d\n", type);
+ BUG();
+ }
+ return I915_MAX_PORTS;
+}
+
+/* On Haswell, DDI port buffers must be programmed with correct values
+ * in advance. The buffer values are different for FDI and DP modes,
+ * but the HDMI/DVI fields are shared among those. So we program the DDI
+ * in either FDI or DP modes only, as HDMI connections will work with both
+ * of those
+ */
+static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
+ bool use_fdi_mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+ int i;
+ const u32 *ddi_translations = ((use_fdi_mode) ?
+ hsw_ddi_translations_fdi :
+ hsw_ddi_translations_dp);
+
+ DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
+ port_name(port),
+ use_fdi_mode ? "FDI" : "DP");
+
+ if (use_fdi_mode && (port != PORT_E))
+ DRM_ERROR("Programming port %c in FDI mode, this probably will not work.",
+ port_name(port));
+
+ for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+ I915_WRITE(reg, ddi_translations[i]);
+ reg += 4;
+ }
+}
+
+/* Program DDI buffers translations for DP. By default, program ports A-D in DP
+ * mode and port E for FDI.
+ */
+void intel_prepare_ddi(struct drm_device *dev)
+{
+ int port;
+
+ if (!HAS_DDI(dev))
+ return;
+
+ for (port = PORT_A; port < PORT_E; port++)
+ intel_prepare_ddi_buffers(dev, port, false);
+
+ /* DDI E is the suggested one to work in FDI mode, so program is as such
+ * by default. It will have to be re-programmed in case a digital DP
+ * output will be detected on it
+ */
+ intel_prepare_ddi_buffers(dev, PORT_E, true);
+}
+
+static const long hsw_ddi_buf_ctl_values[] = {
+ DDI_BUF_EMP_400MV_0DB_HSW,
+ DDI_BUF_EMP_400MV_3_5DB_HSW,
+ DDI_BUF_EMP_400MV_6DB_HSW,
+ DDI_BUF_EMP_400MV_9_5DB_HSW,
+ DDI_BUF_EMP_600MV_0DB_HSW,
+ DDI_BUF_EMP_600MV_3_5DB_HSW,
+ DDI_BUF_EMP_600MV_6DB_HSW,
+ DDI_BUF_EMP_800MV_0DB_HSW,
+ DDI_BUF_EMP_800MV_3_5DB_HSW
+};
+
+static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ uint32_t reg = DDI_BUF_CTL(port);
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ udelay(1);
+ if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+ return;
+ }
+ DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+}
+
+/* Starting with Haswell, different DDI ports can work in FDI mode for
+ * connection to the PCH-located connectors. For this, it is necessary to train
+ * both the DDI port and PCH receiver for the desired DDI buffer settings.
+ *
+ * The recommended port to work in FDI mode is DDI E, which we use here. Also,
+ * please note that when FDI mode is active on DDI E, it shares 2 lines with
+ * DDI A (which is used for eDP)
+ */
+
+void hsw_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 temp, i, rx_ctl_val;
+
+ /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
+ * mode set "sequence for CRT port" document:
+ * - TP1 to TP2 time with the default value
+ * - FDI delay to 90h
+ *
+ * WaFDIAutoLinkSetTimingOverrride:hsw
+ */
+ I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
+ FDI_RX_PWRDN_LANE0_VAL(2) |
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ /* Enable the PCH Receiver FDI PLL */
+ rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_RX_PLL_ENABLE |
+ FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+ udelay(220);
+
+ /* Switch from Rawclk to PCDclk */
+ rx_ctl_val |= FDI_PCDCLK;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+
+ /* Configure Port Clock Select */
+ I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
+
+ /* Start the training iterating through available voltages and emphasis,
+ * testing each value twice. */
+ for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
+ /* Configure DP_TP_CTL with auto-training */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 |
+ DP_TP_CTL_ENABLE);
+
+ /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
+ * DDI E does not support port reversal, the functionality is
+ * achieved on the PCH side in FDI_RX_CTL, so no need to set the
+ * port reversal bit */
+ I915_WRITE(DDI_BUF_CTL(PORT_E),
+ DDI_BUF_CTL_ENABLE |
+ ((intel_crtc->config.fdi_lanes - 1) << 1) |
+ hsw_ddi_buf_ctl_values[i / 2]);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+ udelay(600);
+
+ /* Program PCH FDI Receiver TU */
+ I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
+
+ /* Enable PCH FDI Receiver with auto-training */
+ rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+
+ /* Wait for FDI receiver lane calibration */
+ udelay(30);
+
+ /* Unset FDI_RX_MISC pwrdn lanes */
+ temp = I915_READ(_FDI_RXA_MISC);
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
+
+ /* Wait for FDI auto training time */
+ udelay(5);
+
+ temp = I915_READ(DP_TP_STATUS(PORT_E));
+ if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
+ DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
+
+ /* Enable normal pixel sending for FDI */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
+
+ return;
+ }
+
+ temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+ /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
+ temp = I915_READ(DP_TP_CTL(PORT_E));
+ temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(PORT_E), temp);
+ POSTING_READ(DP_TP_CTL(PORT_E));
+
+ intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+ rx_ctl_val &= ~FDI_RX_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
+
+ /* Reset FDI_RX_MISC pwrdn lanes */
+ temp = I915_READ(_FDI_RXA_MISC);
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
+ }
+
+ DRM_ERROR("FDI link training failed!\n");
+}
+
+static void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ int port = intel_ddi_get_encoder_port(intel_encoder);
+ int pipe = intel_crtc->pipe;
+ int type = intel_encoder->type;
+
+ DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
+ port_name(port), pipe_name(pipe));
+
+ intel_crtc->eld_vld = false;
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+
+ intel_dp->DP = intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
+
+ if (intel_dp->has_audio) {
+ DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
+ pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("DP audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ intel_dp_init_link_config(intel_dp);
+
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (intel_hdmi->has_audio) {
+ /* Proper support for digital audio needs a new logic
+ * and a new set of registers, so we leave it for future
+ * patch bombing.
+ */
+ DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
+ pipe_name(intel_crtc->pipe));
+
+ /* write eld */
+ DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ intel_hdmi->set_infoframes(encoder, adjusted_mode);
+ }
+}
+
+static struct intel_encoder *
+intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder, *ret = NULL;
+ int num_encoders = 0;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ ret = intel_encoder;
+ num_encoders++;
+ }
+
+ if (num_encoders != 1) {
+ DRM_ERROR("%d encoders on crtc for pipe %c\n", num_encoders,
+ pipe_name(intel_crtc->pipe));
+ }
+
+ BUG_ON(ret == NULL);
+ return ret;
+}
+
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t val;
+
+ switch (intel_crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_SPLL:
+ plls->spll_refcount--;
+ if (plls->spll_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling SPLL\n");
+ val = I915_READ(SPLL_CTL);
+ WARN_ON(!(val & SPLL_PLL_ENABLE));
+ I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ POSTING_READ(SPLL_CTL);
+ }
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ plls->wrpll1_refcount--;
+ if (plls->wrpll1_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling WRPLL 1\n");
+ val = I915_READ(WRPLL_CTL1);
+ WARN_ON(!(val & WRPLL_PLL_ENABLE));
+ I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL1);
+ }
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ plls->wrpll2_refcount--;
+ if (plls->wrpll2_refcount == 0) {
+ DRM_DEBUG_KMS("Disabling WRPLL 2\n");
+ val = I915_READ(WRPLL_CTL2);
+ WARN_ON(!(val & WRPLL_PLL_ENABLE));
+ I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL2);
+ }
+ break;
+ }
+
+ if(plls->spll_refcount < 0)
+ DRM_ERROR("Invalid SPLL refcount\n");
+ if(plls->wrpll1_refcount < 0)
+ DRM_ERROR("Invalid WRPLL1 refcount\n");
+ if(plls->wrpll2_refcount < 0)
+ DRM_ERROR("Invalid WRPLL2 refcount\n");
+
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
+}
+
+#define LC_FREQ 2700
+#define LC_FREQ_2K (LC_FREQ * 2000)
+
+#define P_MIN 2
+#define P_MAX 64
+#define P_INC 2
+
+/* Constraints for PLL good behavior */
+#define REF_MIN 48
+#define REF_MAX 400
+#define VCO_MIN 2400
+#define VCO_MAX 4800
+
+#define ABS_DIFF(a, b) ((a > b) ? (a - b) : (b - a))
+
+struct wrpll_rnp {
+ unsigned p, n2, r2;
+};
+
+static unsigned wrpll_get_budget_for_freq(int clock)
+{
+ unsigned budget;
+
+ switch (clock) {
+ case 25175000:
+ case 25200000:
+ case 27000000:
+ case 27027000:
+ case 37762500:
+ case 37800000:
+ case 40500000:
+ case 40541000:
+ case 54000000:
+ case 54054000:
+ case 59341000:
+ case 59400000:
+ case 72000000:
+ case 74176000:
+ case 74250000:
+ case 81000000:
+ case 81081000:
+ case 89012000:
+ case 89100000:
+ case 108000000:
+ case 108108000:
+ case 111264000:
+ case 111375000:
+ case 148352000:
+ case 148500000:
+ case 162000000:
+ case 162162000:
+ case 222525000:
+ case 222750000:
+ case 296703000:
+ case 297000000:
+ budget = 0;
+ break;
+ case 233500000:
+ case 245250000:
+ case 247750000:
+ case 253250000:
+ case 298000000:
+ budget = 1500;
+ break;
+ case 169128000:
+ case 169500000:
+ case 179500000:
+ case 202000000:
+ budget = 2000;
+ break;
+ case 256250000:
+ case 262500000:
+ case 270000000:
+ case 272500000:
+ case 273750000:
+ case 280750000:
+ case 281250000:
+ case 286000000:
+ case 291750000:
+ budget = 4000;
+ break;
+ case 267250000:
+ case 268500000:
+ budget = 5000;
+ break;
+ default:
+ budget = 1000;
+ break;
+ }
+
+ return budget;
+}
+
+static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
+ unsigned r2, unsigned n2, unsigned p,
+ struct wrpll_rnp *best)
+{
+ uint64_t a, b, c, d, diff, diff_best;
+
+ /* No best (r,n,p) yet */
+ if (best->p == 0) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ return;
+ }
+
+ /*
+ * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
+ * freq2k.
+ *
+ * delta = 1e6 *
+ * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
+ * freq2k;
+ *
+ * and we would like delta <= budget.
+ *
+ * If the discrepancy is above the PPM-based budget, always prefer to
+ * improve upon the previous solution. However, if you're within the
+ * budget, try to maximize Ref * VCO, that is N / (P * R^2).
+ */
+ a = freq2k * budget * p * r2;
+ b = freq2k * budget * best->p * best->r2;
+ diff = ABS_DIFF((freq2k * p * r2), (LC_FREQ_2K * n2));
+ diff_best = ABS_DIFF((freq2k * best->p * best->r2),
+ (LC_FREQ_2K * best->n2));
+ c = 1000000 * diff;
+ d = 1000000 * diff_best;
+
+ if (a < c && b < d) {
+ /* If both are above the budget, pick the closer */
+ if (best->p * best->r2 * diff < p * r2 * diff_best) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ }
+ } else if (a >= c && b < d) {
+ /* If A is below the threshold but B is above it? Update. */
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ } else if (a >= c && b >= d) {
+ /* Both are below the limit, so pick the higher n2/(r2*r2) */
+ if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ }
+ }
+ /* Otherwise a < c && b >= d, do nothing */
+}
+
+static void
+intel_ddi_calculate_wrpll(int clock /* in Hz */,
+ unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
+{
+ uint64_t freq2k;
+ unsigned p, n2, r2;
+ struct wrpll_rnp best = { 0, 0, 0 };
+ unsigned budget;
+
+ freq2k = clock / 100;
+
+ budget = wrpll_get_budget_for_freq(clock);
+
+ /* Special case handling for 540 pixel clock: bypass WR PLL entirely
+ * and directly pass the LC PLL to it. */
+ if (freq2k == 5400000) {
+ *n2_out = 2;
+ *p_out = 1;
+ *r2_out = 2;
+ return;
+ }
+
+ /*
+ * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
+ * the WR PLL.
+ *
+ * We want R so that REF_MIN <= Ref <= REF_MAX.
+ * Injecting R2 = 2 * R gives:
+ * REF_MAX * r2 > LC_FREQ * 2 and
+ * REF_MIN * r2 < LC_FREQ * 2
+ *
+ * Which means the desired boundaries for r2 are:
+ * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
+ *
+ */
+ for (r2 = LC_FREQ * 2 / REF_MAX + 1;
+ r2 <= LC_FREQ * 2 / REF_MIN;
+ r2++) {
+
+ /*
+ * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
+ *
+ * Once again we want VCO_MIN <= VCO <= VCO_MAX.
+ * Injecting R2 = 2 * R and N2 = 2 * N, we get:
+ * VCO_MAX * r2 > n2 * LC_FREQ and
+ * VCO_MIN * r2 < n2 * LC_FREQ)
+ *
+ * Which means the desired boundaries for n2 are:
+ * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
+ */
+ for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
+ n2 <= VCO_MAX * r2 / LC_FREQ;
+ n2++) {
+
+ for (p = P_MIN; p <= P_MAX; p += P_INC)
+ wrpll_update_rnp(freq2k, budget,
+ r2, n2, p, &best);
+ }
+ }
+
+ *n2_out = best.n2;
+ *p_out = best.p;
+ *r2_out = best.r2;
+
+ DRM_DEBUG_KMS("WRPLL: %dHz refresh rate with p=%d, n2=%d r2=%d\n",
+ clock, *p_out, *n2_out, *r2_out);
+}
+
+bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ int type = intel_encoder->type;
+ enum pipe pipe = intel_crtc->pipe;
+ uint32_t reg, val;
+ int clock = intel_crtc->config.port_clock;
+
+ /* TODO: reuse PLLs when possible (compare values) */
+
+ intel_ddi_put_crtc_pll(crtc);
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ switch (intel_dp->link_bw) {
+ case DP_LINK_BW_1_62:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+ break;
+ case DP_LINK_BW_2_7:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+ break;
+ case DP_LINK_BW_5_4:
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+ break;
+ default:
+ DRM_ERROR("Link bandwidth %d unsupported\n",
+ intel_dp->link_bw);
+ return false;
+ }
+
+ /* We don't need to turn any PLL on because we'll use LCPLL. */
+ return true;
+
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ unsigned p, n2, r2;
+
+ if (plls->wrpll1_refcount == 0) {
+ DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
+ pipe_name(pipe));
+ plls->wrpll1_refcount++;
+ reg = WRPLL_CTL1;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+ } else if (plls->wrpll2_refcount == 0) {
+ DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
+ pipe_name(pipe));
+ plls->wrpll2_refcount++;
+ reg = WRPLL_CTL2;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+ } else {
+ DRM_ERROR("No WRPLLs available!\n");
+ return false;
+ }
+
+ if(I915_READ(reg) & WRPLL_PLL_ENABLE)
+ DRM_ERROR("WRPLL already enabled\n");
+
+ intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ if (plls->spll_refcount == 0) {
+ DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
+ pipe_name(pipe));
+ plls->spll_refcount++;
+ reg = SPLL_CTL;
+ intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+ } else {
+ DRM_ERROR("SPLL already in use\n");
+ return false;
+ }
+
+ if(I915_READ(reg) & SPLL_PLL_ENABLE)
+ DRM_ERROR("SPLL already enabled\n");
+
+ val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+
+ } else {
+ DRM_ERROR("Invalid DDI encoder type %d\n", type);
+ BUG_ON(1);
+ return false;
+ }
+
+ I915_WRITE(reg, val);
+ udelay(20);
+
+ return true;
+}
+
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ int type = intel_encoder->type;
+ uint32_t temp;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+
+ temp = TRANS_MSA_SYNC_CLK;
+ switch (intel_crtc->config.pipe_bpp) {
+ case 18:
+ temp |= TRANS_MSA_6_BPC;
+ break;
+ case 24:
+ temp |= TRANS_MSA_8_BPC;
+ break;
+ case 30:
+ temp |= TRANS_MSA_10_BPC;
+ break;
+ case 36:
+ temp |= TRANS_MSA_12_BPC;
+ break;
+ default:
+ BUG_ON(1);
+ }
+ I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
+ }
+}
+
+void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t temp;
+
+ /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+ temp = TRANS_DDI_FUNC_ENABLE;
+ temp |= TRANS_DDI_SELECT_PORT(port);
+
+ switch (intel_crtc->config.pipe_bpp) {
+ case 18:
+ temp |= TRANS_DDI_BPC_6;
+ break;
+ case 24:
+ temp |= TRANS_DDI_BPC_8;
+ break;
+ case 30:
+ temp |= TRANS_DDI_BPC_10;
+ break;
+ case 36:
+ temp |= TRANS_DDI_BPC_12;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DDI_PVSYNC;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DDI_PHSYNC;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ switch (pipe) {
+ case PIPE_A:
+ /* Can only use the always-on power well for eDP when
+ * not using the panel fitter, and when not using motion
+ * blur mitigation (which we don't support). */
+ if (intel_crtc->config.pch_pfit.size)
+ temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+ else
+ temp |= TRANS_DDI_EDP_INPUT_A_ON;
+ break;
+ case PIPE_B:
+ temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+ break;
+ case PIPE_C:
+ temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (intel_hdmi->has_hdmi_sink)
+ temp |= TRANS_DDI_MODE_SELECT_HDMI;
+ else
+ temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+ } else if (type == INTEL_OUTPUT_ANALOG) {
+ temp |= TRANS_DDI_MODE_SELECT_FDI;
+ temp |= (intel_crtc->config.fdi_lanes - 1) << 1;
+ } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+ type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+ temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
+ } else {
+ DRM_ERROR("Invalid encoder type %d for pipe %c\n",
+ intel_encoder->type, pipe_name(pipe));
+ BUG_ON(1);
+ }
+
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+ uint32_t val = I915_READ(reg);
+
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+ val |= TRANS_DDI_PORT_NONE;
+ I915_WRITE(reg, val);
+}
+
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
+{
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ int type = intel_connector->base.connector_type;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum pipe pipe = 0;
+ enum transcoder cpu_transcoder;
+ uint32_t tmp;
+
+ if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
+ return false;
+
+ if (port == PORT_A)
+ cpu_transcoder = TRANSCODER_EDP;
+ else
+ cpu_transcoder = (enum transcoder)pipe;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
+ case TRANS_DDI_MODE_SELECT_HDMI:
+ case TRANS_DDI_MODE_SELECT_DVI:
+ return (type == DRM_MODE_CONNECTOR_HDMIA);
+
+ case TRANS_DDI_MODE_SELECT_DP_SST:
+ if (type == DRM_MODE_CONNECTOR_eDP)
+ return true;
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ return (type == DRM_MODE_CONNECTOR_DisplayPort);
+
+ case TRANS_DDI_MODE_SELECT_FDI:
+ return (type == DRM_MODE_CONNECTOR_VGA);
+
+ default:
+ return false;
+ }
+}
+
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ u32 tmp;
+ int i;
+
+ tmp = I915_READ(DDI_BUF_CTL(port));
+
+ if (!(tmp & DDI_BUF_CTL_ENABLE))
+ return false;
+
+ if (port == PORT_A) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ *pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ *pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ *pipe = PIPE_C;
+ break;
+ }
+
+ return true;
+ } else {
+ for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+ if ((tmp & TRANS_DDI_PORT_MASK)
+ == TRANS_DDI_SELECT_PORT(port)) {
+ *pipe = i;
+ return true;
+ }
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
+
+ return false;
+}
+
+static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ uint32_t temp, ret;
+ enum port port = I915_MAX_PORTS;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ int i;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ port = PORT_A;
+ } else {
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ temp &= TRANS_DDI_PORT_MASK;
+
+ for (i = PORT_B; i <= PORT_E; i++)
+ if (temp == TRANS_DDI_SELECT_PORT(i))
+ port = i;
+ }
+
+ if (port == I915_MAX_PORTS) {
+ DRM_ERROR("Pipe %c enabled on an unknown port\n",
+ pipe_name(pipe));
+ ret = PORT_CLK_SEL_NONE;
+ } else {
+ ret = I915_READ(PORT_CLK_SEL(port));
+ DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
+ "0x%08x\n", pipe_name(pipe), port_name(port),
+ ret);
+ }
+
+ return ret;
+}
+
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ struct intel_crtc *intel_crtc;
+
+ for_each_pipe(pipe) {
+ intel_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+ if (!intel_crtc->active)
+ continue;
+
+ intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
+ pipe);
+
+ switch (intel_crtc->ddi_pll_sel) {
+ case PORT_CLK_SEL_SPLL:
+ dev_priv->ddi_plls.spll_refcount++;
+ break;
+ case PORT_CLK_SEL_WRPLL1:
+ dev_priv->ddi_plls.wrpll1_refcount++;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ dev_priv->ddi_plls.wrpll2_refcount++;
+ break;
+ }
+ }
+}
+
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_PORT(port));
+}
+
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_DISABLED);
+}
+
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
+ }
+
+ WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ if (port != PORT_A)
+ intel_dp_stop_link_train(intel_dp);
+ }
+}
+
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t val;
+ bool wait = false;
+
+ val = I915_READ(DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
+ }
+
+ I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ int type = intel_encoder->type;
+ uint32_t tmp;
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(encoder);
+
+ /* In HDMI/DVI mode, the port width, and swing/emphasis values
+ * are ignored so nothing special needs to be done besides
+ * enabling the port.
+ */
+ I915_WRITE(DDI_BUF_CTL(port),
+ intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE);
+ } else if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (port == PORT_A)
+ intel_dp_stop_link_train(intel_dp);
+
+ ironlake_edp_backlight_on(intel_dp);
+ }
+
+ if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ }
+}
+
+static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int type = intel_encoder->type;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
+ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
+ (pipe * 4));
+ I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ }
+
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ ironlake_edp_backlight_off(intel_dp);
+ }
+}
+
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+ if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ return 450000;
+ else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
+ LCPLL_CLK_FREQ_450)
+ return 450000;
+ else if (IS_ULT(dev_priv->dev))
+ return 337500;
+ else
+ return 540000;
+}
+
+void intel_ddi_pll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val = I915_READ(LCPLL_CTL);
+
+ /* The LCPLL register should be turned on by the BIOS. For now let's
+ * just check its state and print errors in case something is wrong.
+ * Don't even try to turn it on.
+ */
+
+ DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
+ intel_ddi_get_cdclk_freq(dev_priv));
+
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+}
+
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ enum port port = intel_dig_port->port;
+ uint32_t val;
+ bool wait = false;
+
+ if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
+ val = I915_READ(DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+ }
+
+ val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+ DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+
+ intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
+ POSTING_READ(DDI_BUF_CTL(port));
+
+ udelay(600);
+}
+
+void intel_ddi_fdi_disable(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+ uint32_t val;
+
+ intel_ddi_post_disable(intel_encoder);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_RX_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, val);
+
+ val = I915_READ(_FDI_RXA_MISC);
+ val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(_FDI_RXA_MISC, val);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_PCDCLK;
+ I915_WRITE(_FDI_RXA_CTL, val);
+
+ val = I915_READ(_FDI_RXA_CTL);
+ val &= ~FDI_RX_PLL_ENABLE;
+ I915_WRITE(_FDI_RXA_CTL, val);
+}
+
+static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ int type = intel_encoder->type;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
+ intel_dp_check_link_status(intel_dp);
+}
+
+static void intel_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ u32 temp, flags = 0;
+
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (temp & TRANS_DDI_PHSYNC)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (temp & TRANS_DDI_PVSYNC)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ pipe_config->adjusted_mode.flags |= flags;
+}
+
+static void intel_ddi_destroy(struct drm_encoder *encoder)
+{
+ /* HDMI has nothing special to destroy, so we can go with this. */
+ intel_dp_encoder_destroy(encoder);
+}
+
+static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ int type = encoder->type;
+ int port = intel_ddi_get_encoder_port(encoder);
+
+ if(type == INTEL_OUTPUT_UNKNOWN)
+ DRM_ERROR("mode_fixup() on unknown output!");
+
+ if (port == PORT_A)
+ pipe_config->cpu_transcoder = TRANSCODER_EDP;
+
+ if (type == INTEL_OUTPUT_HDMI)
+ return intel_hdmi_compute_config(encoder, pipe_config);
+ else
+ return intel_dp_compute_config(encoder, pipe_config);
+}
+
+static const struct drm_encoder_funcs intel_ddi_funcs = {
+ .destroy = intel_ddi_destroy,
+};
+
+static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
+ .mode_set = intel_ddi_mode_set,
+};
+
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *hdmi_connector = NULL;
+ struct intel_connector *dp_connector = NULL;
+
+ intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!dp_connector) {
+ kfree(intel_dig_port, sizeof(struct intel_digital_port));
+
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, encoder, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
+
+ intel_encoder->compute_config = intel_ddi_compute_config;
+ intel_encoder->enable = intel_enable_ddi;
+ intel_encoder->pre_enable = intel_ddi_pre_enable;
+ intel_encoder->disable = intel_disable_ddi;
+ intel_encoder->post_disable = intel_ddi_post_disable;
+ intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+ intel_encoder->get_config = intel_ddi_get_config;
+
+ intel_dig_port->port = port;
+ intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ (DDI_BUF_PORT_REVERSAL |
+ DDI_A_4_LANES);
+ intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+
+ intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+ intel_encoder->hot_plug = intel_ddi_hot_plug;
+
+ if (!intel_dp_init_connector(intel_dig_port, dp_connector)) {
+ drm_encoder_cleanup(encoder);
+ kfree(intel_dig_port, sizeof(struct intel_digital_port));
+ kfree(dp_connector, sizeof(struct intel_connector));
+ return;
+ }
+
+ if (intel_encoder->type != INTEL_OUTPUT_EDP) {
+ hdmi_connector = kzalloc(sizeof(struct intel_connector),
+ GFP_KERNEL);
+ if (!hdmi_connector) {
+ return;
+ }
+
+ intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+ intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
+ }
+}
diff --git a/usr/src/uts/intel/io/i915/intel_display.c b/usr/src/uts/intel/io/i915/intel_display.c
new file mode 100644
index 0000000..f7d58e5
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_display.c
@@ -0,0 +1,10275 @@
+/*
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006-2007, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include "drm_edid.h"
+#include "drmP.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "drm_dp_helper.h"
+
+#include "drm_crtc_helper.h"
+
+bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
+static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
+
+typedef struct {
+ int min, max;
+} intel_range_t;
+
+typedef struct {
+ int dot_limit;
+ int p2_slow, p2_fast;
+} intel_p2_t;
+
+#define INTEL_P2_NUM 2
+typedef struct intel_limit intel_limit_t;
+struct intel_limit {
+ intel_range_t dot, vco, n, m, m1, m2, p, p1;
+ intel_p2_t p2;
+};
+
+/* FDI */
+#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
+
+int
+intel_pch_rawclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!HAS_PCH_SPLIT(dev));
+
+ return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
+}
+
+static inline u32 /* units of 100MHz */
+intel_fdi_link_freq(struct drm_device *dev)
+{
+ if (IS_GEN5(dev)) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
+ } else
+ return 27;
+}
+
+static const intel_limit_t intel_limits_i8xx_dvo = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 930000, .max = 1400000 },
+ .n = { .min = 3, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 2 },
+};
+
+static const intel_limit_t intel_limits_i8xx_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 930000, .max = 1400000 },
+ .n = { .min = 3, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 1, .max = 6 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 14, .p2_fast = 7 },
+};
+
+static const intel_limit_t intel_limits_i9xx_sdvo = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const intel_limit_t intel_limits_i9xx_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 7, .max = 98 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 7 },
+};
+
+
+static const intel_limit_t intel_limits_g4x_sdvo = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 1, .max = 3},
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 10,
+ .p2_fast = 10
+ },
+};
+
+static const intel_limit_t intel_limits_g4x_hdmi = {
+ .dot = { .min = 22000, .max = 400000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ /* n.min 1->2 fix high resolution issue */
+ .n = { .min = 2, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 16, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8},
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
+ .dot = { .min = 20000, .max = 115000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 14, .p2_fast = 14
+ },
+};
+
+static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
+ .dot = { .min = 80000, .max = 224000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 7, .p2_fast = 7
+ },
+};
+
+static const intel_limit_t intel_limits_pineview_sdvo = {
+ .dot = { .min = 20000, .max = 400000},
+ .vco = { .min = 1700000, .max = 3500000 },
+ /* Pineview's Ncounter is a ring counter */
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ /* Pineview only has one combined m divider, which we treat as m2. */
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const intel_limit_t intel_limits_pineview_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1700000, .max = 3500000 },
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 7, .max = 112 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+/* Ironlake / Sandybridge
+ *
+ * We calculate clock using (register_value + 2) for N/M1/M2, so here
+ * the range value for them is (actual_value - 2).
+ */
+static const intel_limit_t intel_limits_ironlake_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ /* n.min 1->2 fix high resolution issue */
+ .n = { .min = 2, .max = 5 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const intel_limit_t intel_limits_ironlake_single_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 118 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 56 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+};
+
+/* LVDS 100mhz refclk limits. */
+static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2,.max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2,.max = 6 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+};
+
+static const intel_limit_t intel_limits_vlv_dac = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 22, .max = 450 }, /* guess */
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 1, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+};
+
+static const intel_limit_t intel_limits_vlv_hdmi = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 60, .max = 300 }, /* guess */
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+};
+
+static const intel_limit_t intel_limits_vlv_dp = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 22, .max = 450 },
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 1, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+};
+
+static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+ int refclk)
+{
+ struct drm_device *dev = crtc->dev;
+ const intel_limit_t *limit;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_is_dual_link_lvds(dev)) {
+ if (refclk == 100000)
+ limit = &intel_limits_ironlake_dual_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_dual_lvds;
+ } else {
+ if (refclk == 100000)
+ limit = &intel_limits_ironlake_single_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_single_lvds;
+ }
+ } else
+ limit = &intel_limits_ironlake_dac;
+
+ return limit;
+}
+
+static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ const intel_limit_t *limit;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_is_dual_link_lvds(dev))
+ limit = &intel_limits_g4x_dual_channel_lvds;
+ else
+ limit = &intel_limits_g4x_single_channel_lvds;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ limit = &intel_limits_g4x_hdmi;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+ limit = &intel_limits_g4x_sdvo;
+ } else /* The option is for other outputs */
+ limit = &intel_limits_i9xx_sdvo;
+
+ return limit;
+}
+
+static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
+{
+ struct drm_device *dev = crtc->dev;
+ const intel_limit_t *limit;
+
+ if (HAS_PCH_SPLIT(dev))
+ limit = intel_ironlake_limit(crtc, refclk);
+ else if (IS_G4X(dev)) {
+ limit = intel_g4x_limit(crtc);
+ } else if (IS_PINEVIEW(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_pineview_lvds;
+ else
+ limit = &intel_limits_pineview_sdvo;
+ } else if (IS_VALLEYVIEW(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
+ limit = &intel_limits_vlv_dac;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ limit = &intel_limits_vlv_hdmi;
+ else
+ limit = &intel_limits_vlv_dp;
+ } else if (!IS_GEN2(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_i9xx_lvds;
+ else
+ limit = &intel_limits_i9xx_sdvo;
+ } else {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits_i8xx_lvds;
+ else
+ limit = &intel_limits_i8xx_dvo;
+ }
+ return limit;
+}
+
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+static void pineview_clock(int refclk, intel_clock_t *clock)
+{
+ clock->m = clock->m2 + 2;
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / clock->n;
+ clock->dot = clock->vco / clock->p;
+}
+
+static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
+{
+ return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
+}
+
+static void i9xx_clock(int refclk, intel_clock_t *clock)
+{
+ clock->m = i9xx_dpll_compute_m(clock);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+ clock->dot = clock->vco / clock->p;
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *encoder;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->type == type)
+ return true;
+
+ return false;
+}
+
+#define INTELPllInvalid(s) { DRM_DEBUG_KMS(s); return false; }
+/**
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+
+static bool intel_PLL_is_valid(struct drm_device *dev,
+ const intel_limit_t *limit,
+ const intel_clock_t *clock)
+{
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ INTELPllInvalid ("p1 out of range\n");
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ INTELPllInvalid ("p out of range\n");
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ INTELPllInvalid ("m2 out of range\n");
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ INTELPllInvalid ("m1 out of range\n");
+ if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
+ INTELPllInvalid ("m1 <= m2\n");
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ INTELPllInvalid ("m out of range\n");
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ INTELPllInvalid ("n out of range\n");
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ INTELPllInvalid ("vco out of range\n");
+ /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+ * connector, etc., rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ INTELPllInvalid ("dot out of range\n");
+
+ return true;
+}
+
+static bool
+i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ intel_clock_t clock;
+ int err = target;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ /*
+ * For LVDS just rely on its current settings for dual-channel.
+ * We haven't figured out how to reliably set up different
+ * single/dual channel state, if we even can.
+ */
+ if (intel_is_dual_link_lvds(dev))
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ (void) memset (best_clock, 0, sizeof (*best_clock));
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ if (clock.m2 >= clock.m1)
+ break;
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ i9xx_clock(refclk, &clock);
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+static bool
+pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ intel_clock_t clock;
+ int err = target;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ /*
+ * For LVDS just rely on its current settings for dual-channel.
+ * We haven't figured out how to reliably set up different
+ * single/dual channel state, if we even can.
+ */
+ if (intel_is_dual_link_lvds(dev))
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ (void) memset(best_clock, 0, sizeof(*best_clock));
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ pineview_clock(refclk, &clock);
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+static bool
+g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ intel_clock_t clock;
+ int max_n;
+ bool found;
+ /* approximately equals target * 0.00585 */
+ int err_most = (target >> 8) + (target >> 9);
+ found = false;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_is_dual_link_lvds(dev))
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ (void) memset(best_clock, 0, sizeof(*best_clock));
+ max_n = limit->n.max;
+ /* based on hardware requirement prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ /* based on hardware requirement prefer larger m1,m2 */
+ for (clock.m1 = limit->m1.max;
+ clock.m1 >= limit->m1.min; clock.m1--) {
+ for (clock.m2 = limit->m2.max;
+ clock.m2 >= limit->m2.min; clock.m2--) {
+ for (clock.p1 = limit->p1.max;
+ clock.p1 >= limit->p1.min; clock.p1--) {
+ int this_err;
+
+ i9xx_clock(refclk, &clock);
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
+ continue;
+
+ this_err = abs(clock.dot - target) ;
+ if (this_err < err_most) {
+ *best_clock = clock;
+ err_most = this_err;
+ max_n = clock.n;
+ found = true;
+ }
+ }
+ }
+ }
+ }
+ return found;
+}
+
+static bool
+vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
+ u32 m, n, fastclk;
+ u32 updrate, minupdate, p;
+ unsigned long bestppm, ppm, absppm;
+ int dotclk, flag;
+
+ flag = 0;
+ dotclk = target * 1000;
+ bestppm = 1000000;
+ ppm = absppm = 0;
+ fastclk = dotclk / (2*100);
+ updrate = 0;
+ minupdate = 19200;
+ n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
+ bestm1 = bestm2 = bestp1 = bestp2 = 0;
+
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
+ updrate = refclk / n;
+ for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
+ for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
+ if (p2 > 10)
+ p2 = p2 - 1;
+ p = p1 * p2;
+ /* based on hardware requirement, prefer bigger m1,m2 values */
+ for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
+ m2 = (((2*(fastclk * p * n / m1 )) +
+ refclk) / (2*refclk));
+ m = m1 * m2;
+ vco = updrate * m;
+ if (vco >= limit->vco.min && vco < limit->vco.max) {
+ ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
+ absppm = (ppm > 0) ? ppm : (-ppm);
+ if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
+ bestppm = 0;
+ flag = 1;
+ }
+ if (absppm < bestppm - 10) {
+ bestppm = absppm;
+ flag = 1;
+ }
+ if (flag) {
+ bestn = n;
+ bestm1 = m1;
+ bestm2 = m2;
+ bestp1 = p1;
+ bestp2 = p2;
+ flag = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+ best_clock->n = bestn;
+ best_clock->m1 = bestm1;
+ best_clock->m2 = bestm2;
+ best_clock->p1 = bestp1;
+ best_clock->p2 = bestp2;
+
+ return true;
+}
+
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ return intel_crtc->config.cpu_transcoder;
+}
+
+static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 frame, frame_reg = PIPEFRAME(pipe);
+
+ frame = I915_READ(frame_reg);
+
+ if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
+ DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
+/**
+ * intel_wait_for_vblank - wait for vblank on a given pipe
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * Wait for vblank to occur on a given pipe. Needed for various bits of
+ * mode setting code.
+ */
+void intel_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipestat_reg = PIPESTAT(pipe);
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ironlake_wait_for_vblank(dev, pipe);
+ return;
+ }
+
+ /* Clear existing vblank status. Note this will clear any other
+ * sticky status fields as well.
+ *
+ * This races with i915_driver_irq_handler() with the result
+ * that either function could miss a vblank event. Here it is not
+ * fatal, as we will either wait upon the next vblank interrupt or
+ * timeout. Generally speaking intel_wait_for_vblank() is only
+ * called during modeset at which time the GPU should be idle and
+ * should *not* be performing page flips and thus not waiting on
+ * vblanks...
+ * Currently, the result of us stealing a vblank from the irq
+ * handler is that a single frame will be skipped during swapbuffers.
+ */
+ I915_WRITE(pipestat_reg,
+ I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
+
+ /* Wait for vblank interrupt bit to set */
+ if (wait_for(I915_READ(pipestat_reg) &
+ PIPE_VBLANK_INTERRUPT_STATUS,
+ 50))
+ DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
+/*
+ * intel_wait_for_pipe_off - wait for pipe to turn off
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * After disabling a pipe, we can't wait for vblank in the usual way,
+ * spinning on the vblank interrupt status bit, since we won't actually
+ * see an interrupt when the pipe is disabled.
+ *
+ * On Gen4 and above:
+ * wait for the pipe register state bit to turn off
+ *
+ * Otherwise:
+ * wait for the display line value to settle (it usually
+ * ends up stopping at the start of the next frame).
+ *
+ */
+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ int reg = PIPECONF(cpu_transcoder);
+
+ /* Wait for the Pipe State to go off */
+ if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
+ 100))
+ DRM_ERROR("pipe_off wait timed out\n");
+ } else {
+ u32 last_line, line_mask;
+ int reg = PIPEDSL(pipe);
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+ if (IS_GEN2(dev))
+ line_mask = DSL_LINEMASK_GEN2;
+ else
+ line_mask = DSL_LINEMASK_GEN3;
+
+ /* Wait for the display line to settle */
+ do {
+ last_line = I915_READ(reg) & line_mask;
+ mdelay(5);
+ } while (((I915_READ(reg) & line_mask) != last_line) &&
+ time_after(timeout, jiffies));
+ if (time_after(jiffies, timeout))
+ DRM_ERROR("pipe_off wait timed out\n");
+ }
+}
+
+/*
+ * ibx_digital_port_connected - is the specified port connected?
+ * @dev_priv: i915 private structure
+ * @port: the port to test
+ *
+ * Returns true if @port is connected, false otherwise.
+ */
+bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ u32 bit;
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ switch(port->port) {
+ case PORT_B:
+ bit = SDE_PORTB_HOTPLUG;
+ break;
+ case PORT_C:
+ bit = SDE_PORTC_HOTPLUG;
+ break;
+ case PORT_D:
+ bit = SDE_PORTD_HOTPLUG;
+ break;
+ default:
+ return true;
+ }
+ } else {
+ switch(port->port) {
+ case PORT_B:
+ bit = SDE_PORTB_HOTPLUG_CPT;
+ break;
+ case PORT_C:
+ bit = SDE_PORTC_HOTPLUG_CPT;
+ break;
+ case PORT_D:
+ bit = SDE_PORTD_HOTPLUG_CPT;
+ break;
+ default:
+ return true;
+ }
+ }
+
+ return I915_READ(SDEISR) & bit;
+}
+
+static const char *state_string(bool enabled)
+{
+ return enabled ? "on" : "off";
+}
+
+/* Only for pre-ILK configs */
+static void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ if (cur_state != state)
+ DRM_ERROR("PLL state assertion failure (expected %s, current %s)",
+ state_string(state), state_string(cur_state));
+}
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+
+static struct intel_shared_dpll *
+intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+ if (crtc->config.shared_dpll < 0)
+ return NULL;
+
+ return &dev_priv->shared_dplls[crtc->config.shared_dpll];
+}
+
+/* For ILK+ */
+static void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state)
+{
+ bool cur_state;
+ struct intel_dpll_hw_state hw_state;
+
+ if (HAS_PCH_LPT(dev_priv->dev)) {
+ DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
+ return;
+ }
+
+ if (!pll) {
+ DRM_ERROR("asserting PCH PLL %s with no PLL\n", state_string(state));
+ return;
+ }
+
+ cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
+ if (cur_state != state)
+ DRM_DEBUG_KMS("%s assertion failure (expected %s, current %s)",
+ pll->name, state_string(state), state_string(cur_state));
+}
+#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
+#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+
+static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ if (HAS_DDI(dev_priv->dev)) {
+ /* On Haswell, DDI is used instead of FDI_TX_CTL */
+ reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+ val = I915_READ(reg);
+ cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
+ } else {
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_TX_ENABLE);
+ }
+ if(cur_state != state)
+ DRM_ERROR("FDI TX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
+#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
+
+static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
+ if(cur_state != state)
+ DRM_ERROR("FDI RX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
+#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
+
+static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* ILK FDI PLL is always enabled */
+ if (dev_priv->info->gen == 5)
+ return;
+
+ /* On Haswell, DDI ports are responsible for the FDI PLL setup */
+ if (HAS_DDI(dev_priv->dev))
+ return;
+
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ if(!(val & FDI_TX_PLL_ENABLE))
+ DRM_ERROR("FDI TX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ if(!(val & FDI_RX_PLL_ENABLE))
+ DRM_ERROR("FDI RX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int pp_reg, lvds_reg;
+ u32 val;
+ enum pipe panel_pipe = PIPE_A;
+ bool locked = true;
+
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ pp_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ } else {
+ pp_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ }
+
+ val = I915_READ(pp_reg);
+ if (!(val & PANEL_POWER_ON) ||
+ ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
+ locked = false;
+
+ if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
+ panel_pipe = PIPE_B;
+
+ if(panel_pipe == pipe && locked)
+ DRM_ERROR("panel assertion failure, pipe %c regs locked\n",
+ pipe_name(pipe));
+}
+
+void assert_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+
+ /* if we need the pipe A quirk it must be always on */
+ if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+ state = true;
+
+ if (!intel_display_power_enabled(dev_priv->dev,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
+ cur_state = false;
+ } else {
+ reg = PIPECONF(cpu_transcoder);
+ val = I915_READ(reg);
+ cur_state = !!(val & PIPECONF_ENABLE);
+ }
+
+ if(cur_state != state)
+ DRM_ERROR("pipe %c assertion failure (expected %s, current %s)\n",
+ pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+
+static void assert_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ cur_state = !!(val & DISPLAY_PLANE_ENABLE);
+ if(cur_state != state)
+ DRM_ERROR("plane %c assertion failure, should be active but is disabled\n",
+ plane_name(plane));
+}
+
+#define assert_plane_enabled(d, p) assert_plane(d, p, true)
+#define assert_plane_disabled(d, p) assert_plane(d, p, false)
+
+static void assert_planes_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int reg, i;
+ u32 val;
+ int cur_pipe;
+
+ /* Planes are fixed to pipes on ILK+ */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ reg = DSPCNTR(pipe);
+ val = I915_READ(reg);
+ if(val & DISPLAY_PLANE_ENABLE)
+ DRM_ERROR("plane %c assertion failure, should be disabled but not\n",
+ plane_name(pipe));
+ return;
+ }
+
+ /* Need to check both planes against the pipe */
+ for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+ reg = DSPCNTR(i);
+ val = I915_READ(reg);
+ cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
+ if((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe)
+ DRM_ERROR("plane %c assertion failure, should be off on pipe %c but is still active\n",
+ plane_name(i), pipe_name(pipe));
+ }
+}
+
+static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int reg, i;
+ u32 val;
+
+ if (IS_VALLEYVIEW(dev)) {
+ for (i = 0; i < dev_priv->num_plane; i++) {
+ reg = SPCNTR(pipe, i);
+ val = I915_READ(reg);
+ if(val & SP_ENABLE)
+ DRM_ERROR("sprite %c assertion failure, should be off on pipe %c but is still active\n",
+ sprite_name(pipe, i), pipe_name(pipe));
+ }
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ reg = SPRCTL(pipe);
+ val = I915_READ(reg);
+ if(val & SPRITE_ENABLE)
+ DRM_ERROR("sprite %c assertion failure, should be off on pipe %c but is still active\n",
+ plane_name(pipe), pipe_name(pipe));
+ } else if (INTEL_INFO(dev)->gen >= 5) {
+ reg = DVSCNTR(pipe);
+ val = I915_READ(reg);
+ if(val & DVS_ENABLE)
+ DRM_ERROR("sprite %c assertion failure, should be off on pipe %c but is still active\n",
+ plane_name(pipe), pipe_name(pipe));
+ }
+}
+
+static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+ bool enabled;
+
+ if (HAS_PCH_LPT(dev_priv->dev)) {
+ DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
+ return;
+ }
+
+ val = I915_READ(PCH_DREF_CONTROL);
+ enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+ DREF_SUPERSPREAD_SOURCE_MASK));
+ if(!enabled)
+ DRM_ERROR("PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+ bool enabled;
+
+ reg = PCH_TRANSCONF(pipe);
+ val = I915_READ(reg);
+ enabled = !!(val & TRANS_ENABLE);
+ if(enabled)
+ DRM_ERROR("transcoder assertion failed, should be off on pipe %c but is still active\n",
+ pipe_name(pipe));
+}
+
+static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 port_sel, u32 val)
+{
+ if ((val & DP_PORT_EN) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
+ u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
+ if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
+ return false;
+ } else {
+ if ((val & DP_PIPE_MASK) != (pipe << 30))
+ return false;
+ }
+ return true;
+}
+
+static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & SDVO_ENABLE) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
+ return false;
+ }
+ return true;
+}
+
+static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & LVDS_PORT_EN) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
+ return false;
+ }
+ return true;
+}
+
+static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & ADPA_DAC_ENABLE) == 0)
+ return false;
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
+ return false;
+ }
+ return true;
+}
+
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg, u32 port_sel)
+{
+ u32 val = I915_READ(reg);
+ if(dp_pipe_enabled(dev_priv, pipe, port_sel, val))
+ DRM_ERROR("PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+
+ if (HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+ && (val & DP_PIPEB_SELECT))
+ DRM_ERROR("IBX PCH dp port still using transcoder B\n");
+}
+
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ if(hdmi_pipe_enabled(dev_priv, pipe, val))
+ DRM_ERROR("PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+
+ if (HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
+ && (val & SDVO_PIPE_B_SELECT))
+ DRM_ERROR("IBX PCH hdmi port still using transcoder B\n");
+}
+
+static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+ if(adpa_pipe_enabled(dev_priv, pipe, val))
+ DRM_ERROR("PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+ if(lvds_pipe_enabled(dev_priv, pipe, val))
+ DRM_ERROR("PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
+}
+
+/**
+ * intel_enable_pll - enable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
+ * make sure the PLL reg is writable first though, since the panel write
+ * protect mechanism may be enabled.
+ *
+ * Note! This is for pre-ILK only.
+ *
+ * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
+ */
+static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ assert_pipe_disabled(dev_priv, pipe);
+
+ /* No really, not for ILK+ */
+ BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
+
+ /* PLL is protected by panel, make sure we can write it */
+ if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
+ assert_panel_unlocked(dev_priv, pipe);
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ val |= DPLL_VCO_ENABLE;
+
+ /* We do this three times for luck */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+}
+
+/**
+ * intel_disable_pll - disable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe, making sure the pipe is off first.
+ *
+ * Note! This is for pre-ILK only.
+ */
+static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, pipe);
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ val &= ~DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
+{
+ u32 port_mask;
+
+ if (!port)
+ port_mask = DPLL_PORTB_READY_MASK;
+ else
+ port_mask = DPLL_PORTC_READY_MASK;
+
+ if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
+ DRM_ERROR("timed out waiting for port %c ready: 0x%08x\n",
+ 'B' + port, I915_READ(DPLL(0)));
+}
+
+/**
+ * ironlake_enable_pch_pll - enable PCH PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * The PCH PLL needs to be enabled before the PCH transcoder, since it
+ * drives the transcoder clock.
+ */
+static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+
+ /* PCH PLLs only available on ILK, SNB and IVB */
+ BUG_ON(dev_priv->info->gen < 5);
+ if (pll == NULL) {
+ DRM_ERROR("pll is NULL");
+ return;
+ }
+ if (pll->refcount == 0) {
+ DRM_ERROR("pll refcount equal to 0");
+ return;
+ }
+
+ DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
+ pll->name, pll->active, pll->on,
+ crtc->base.base.id);
+
+ if (pll->active++) {
+ WARN_ON(!pll->on);
+ assert_shared_dpll_enabled(dev_priv, pll);
+ return;
+ }
+ WARN_ON(pll->on);
+
+ DRM_DEBUG_KMS("enabling %s\n", pll->name);
+ pll->enable(dev_priv, pll);
+ pll->on = true;
+}
+
+static void intel_disable_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+ if (pll == NULL) {
+ DRM_ERROR("pll is NULL");
+ return;
+ }
+
+ if (pll->refcount == 0) {
+ DRM_ERROR("pll refcount equal to 0");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
+ pll->name, pll->active, pll->on,
+ crtc->base.base.id);
+
+ if ((pll->active == 0)) {
+ assert_shared_dpll_disabled(dev_priv, pll);
+ return;
+ }
+
+ assert_shared_dpll_enabled(dev_priv, pll);
+ WARN_ON(!pll->on);
+ if (--pll->active)
+ return;
+
+ DRM_DEBUG_KMS("disabling %s\n", pll->name);
+ pll->disable(dev_priv, pll);
+ pll->on = false;
+}
+
+static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t reg, val, pipeconf_val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* Make sure PCH DPLL is enabled */
+ assert_shared_dpll_enabled(dev_priv,
+ intel_crtc_to_shared_dpll(intel_crtc));
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, pipe);
+ assert_fdi_rx_enabled(dev_priv, pipe);
+
+ if (HAS_PCH_CPT(dev)) {
+ /* Workaround: Set the timing override bit before enabling the
+ * pch transcoder. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = I915_READ(reg);
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(reg, val);
+ }
+
+ reg = PCH_TRANSCONF(pipe);
+ val = I915_READ(reg);
+ pipeconf_val = I915_READ(PIPECONF(pipe));
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ val &= ~PIPECONF_BPC_MASK;
+ val |= pipeconf_val & PIPECONF_BPC_MASK;
+ }
+
+ val &= ~TRANS_INTERLACE_MASK;
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
+ if (HAS_PCH_IBX(dev_priv->dev) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
+ val |= TRANS_LEGACY_INTERLACED_ILK;
+ else
+ val |= TRANS_INTERLACED;
+ else
+ val |= TRANS_PROGRESSIVE;
+
+ I915_WRITE(reg, val | TRANS_ENABLE);
+ if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+ DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
+}
+
+static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ u32 val, pipeconf_val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, (enum pipe)cpu_transcoder);
+ assert_fdi_rx_enabled(dev_priv, (enum pipe)TRANSCODER_A);
+
+ /* Workaround: set timing override bit. */
+ val = I915_READ(_TRANSA_CHICKEN2);
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(_TRANSA_CHICKEN2, val);
+
+ val = TRANS_ENABLE;
+ pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
+ PIPECONF_INTERLACED_ILK)
+ val |= TRANS_INTERLACED;
+ else
+ val |= TRANS_PROGRESSIVE;
+
+ I915_WRITE(LPT_TRANSCONF, val);
+ if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
+ DRM_ERROR("Failed to enable PCH transcoder\n");
+}
+
+static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t reg, val;
+
+ /* FDI relies on the transcoder */
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+
+ /* Ports must be off as well */
+ assert_pch_ports_disabled(dev_priv, pipe);
+
+ reg = PCH_TRANSCONF(pipe);
+ val = I915_READ(reg);
+ val &= ~TRANS_ENABLE;
+ I915_WRITE(reg, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+ DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
+
+ if (!HAS_PCH_IBX(dev)) {
+ /* Workaround: Clear the timing override chicken bit again. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = I915_READ(reg);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(reg, val);
+ }
+}
+
+static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(LPT_TRANSCONF);
+ val &= ~TRANS_ENABLE;
+ I915_WRITE(LPT_TRANSCONF, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
+ DRM_ERROR("Failed to disable PCH transcoder\n");
+
+ /* Workaround: clear timing override bit. */
+ val = I915_READ(_TRANSA_CHICKEN2);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ I915_WRITE(_TRANSA_CHICKEN2, val);
+}
+
+/**
+ * intel_enable_pipe - enable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to enable
+ * @pch_port: on ILK+, is this pipe driving a PCH port or not
+ *
+ * Enable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe is actually running (i.e. first vblank) before
+ * returning.
+ */
+static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool pch_port)
+{
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ enum pipe pch_transcoder;
+ int reg;
+ u32 val;
+
+ assert_planes_disabled(dev_priv, pipe);
+ assert_sprites_disabled(dev_priv, pipe);
+
+ if (HAS_PCH_LPT(dev_priv->dev))
+ pch_transcoder = (enum pipe)TRANSCODER_A;
+ else
+ pch_transcoder = pipe;
+
+ /*
+ * A pipe without a PLL won't actually be able to drive bits from
+ * a plane. On ILK+ the pipe PLLs are integrated, so we don't
+ * need the check.
+ */
+ if (!HAS_PCH_SPLIT(dev_priv->dev))
+ assert_pll_enabled(dev_priv, pipe);
+ else {
+ if (pch_port) {
+ /* if driving the PCH, we need FDI enabled */
+ assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+ assert_fdi_tx_pll_enabled(dev_priv,
+ (enum pipe) cpu_transcoder);
+ }
+ /* FIXME: assert CPU port conditions for SNB+ */
+ }
+
+ reg = PIPECONF(cpu_transcoder);
+ val = I915_READ(reg);
+ if (val & PIPECONF_ENABLE)
+ return;
+
+ I915_WRITE(reg, val | PIPECONF_ENABLE);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+/**
+ * intel_disable_pipe - disable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to disable
+ *
+ * Disable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe has shut down before returning.
+ */
+static void intel_disable_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ int reg;
+ u32 val;
+
+ /*
+ * Make sure planes won't keep trying to pump pixels to us,
+ * or we might hang the display.
+ */
+ assert_planes_disabled(dev_priv, pipe);
+ assert_sprites_disabled(dev_priv, pipe);
+
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ reg = PIPECONF(cpu_transcoder);
+ val = I915_READ(reg);
+ if ((val & PIPECONF_ENABLE) == 0)
+ return;
+
+ I915_WRITE(reg, val & ~PIPECONF_ENABLE);
+ intel_wait_for_pipe_off(dev_priv->dev, pipe);
+}
+
+/*
+ * Plane regs are double buffered, going from enabled->disabled needs a
+ * trigger in order to latch. The display address reg provides this.
+ */
+void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+ enum plane plane)
+{
+ if (dev_priv->info->gen >= 4)
+ I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+ else
+ I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
+}
+
+/**
+ * intel_enable_plane - enable a display plane on a given pipe
+ * @dev_priv: i915 private structure
+ * @plane: plane to enable
+ * @pipe: pipe being fed
+ *
+ * Enable @plane on @pipe, making sure that @pipe is running first.
+ */
+static void intel_enable_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* If the pipe isn't enabled, we can't pump pixels and may hang */
+ assert_pipe_enabled(dev_priv, pipe);
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ if (val & DISPLAY_PLANE_ENABLE)
+ return;
+
+ I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev_priv, plane);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+/**
+ * intel_disable_plane - disable a display plane
+ * @dev_priv: i915 private structure
+ * @plane: plane to disable
+ * @pipe: pipe consuming the data
+ *
+ * Disable @plane; should be an independent operation.
+ */
+static void intel_disable_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ if ((val & DISPLAY_PLANE_ENABLE) == 0)
+ return;
+
+ I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev_priv, plane);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+static bool need_vtd_wa(struct drm_device *dev)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
+ return true;
+#endif
+ return false;
+}
+
+int
+intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 alignment;
+ int ret;
+
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ alignment = 128 * 1024;
+ else if (INTEL_INFO(dev)->gen >= 4)
+ alignment = 4 * 1024;
+ else
+ alignment = 64 * 1024;
+ break;
+ case I915_TILING_X:
+ /* pin() will align the object as required by fence */
+ alignment = 0;
+ break;
+ case I915_TILING_Y:
+ /* Despite that we check this in framebuffer_init userspace can
+ * screw us over and change the tiling after the fact. Only
+ * pinned buffers can't change their tiling. */
+ DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
+ return -EINVAL;
+ default:
+ BUG();
+ return -EINVAL;
+ }
+
+ /* Note that the w/a also requires 64 PTE of padding following the
+ * bo. We currently fill all unused PTE with the shadow page and so
+ * we should always have valid PTE following the scanout preventing
+ * the VT-d warning.
+ */
+ if (need_vtd_wa(dev) && alignment < 256 * 1024)
+ alignment = 256 * 1024;
+
+ dev_priv->mm.interruptible = false;
+ ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
+ if (ret)
+ goto err_interruptible;
+
+ /* Install a fence for tiled scan-out. Pre-i965 always needs a
+ * fence, whereas 965+ only requires a fence if using
+ * framebuffer compression. For simplicity, we always install
+ * a fence as the cost is not that onerous.
+ */
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ goto err_unpin;
+
+ i915_gem_object_pin_fence(obj);
+
+ dev_priv->mm.interruptible = true;
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_interruptible:
+ dev_priv->mm.interruptible = true;
+ return ret;
+}
+
+void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_fence(obj);
+ i915_gem_object_unpin(obj);
+}
+
+/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
+ * is assumed to be a power-of-two. */
+unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int cpp,
+ unsigned int pitch)
+{
+ if (tiling_mode != I915_TILING_NONE) {
+ unsigned int tile_rows, tiles;
+
+ tile_rows = *y / 8;
+ *y %= 8;
+
+ tiles = *x / (512/cpp);
+ *x %= 512/cpp;
+
+ return tile_rows * pitch * 8 + tiles * 4096;
+ } else {
+ unsigned int offset;
+
+ offset = *y * pitch + *x * cpp;
+ *y = 0;
+ *x = (offset & 4095) / cpp;
+ return offset & -4096;
+ }
+}
+
+static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int plane = intel_crtc->plane;
+ unsigned long linear_offset;
+ u32 dspcntr;
+ u32 reg;
+
+ switch (plane) {
+ case 0:
+ case 1:
+ break;
+ default:
+ DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
+ return -EINVAL;
+ }
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ reg = DSPCNTR(plane);
+ dspcntr = I915_READ(reg);
+ /* Mask out pixel format bits in case we change it */
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISPPLANE_BGRX555;
+ break;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
+ break;
+ default:
+ DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+ }
+
+ if (IS_G4X(dev))
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ I915_WRITE(reg, dspcntr);
+
+ linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ intel_crtc->dspaddr_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
+ linear_offset -= intel_crtc->dspaddr_offset;
+ } else {
+ intel_crtc->dspaddr_offset = linear_offset;
+ }
+
+ DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
+ obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_MODIFY_DISPBASE(DSPSURF(plane),
+ obj->gtt_offset + intel_crtc->dspaddr_offset);
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
+ } else
+ I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
+ POSTING_READ(reg);
+
+ return 0;
+}
+
+static int ironlake_update_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int plane = intel_crtc->plane;
+ unsigned long linear_offset;
+ u32 dspcntr;
+ u32 reg;
+
+ switch (plane) {
+ case 0:
+ case 1:
+ case 2:
+ break;
+ default:
+ DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
+ return -EINVAL;
+ }
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ reg = DSPCNTR(plane);
+ dspcntr = I915_READ(reg);
+ /* Mask out pixel format bits in case we change it */
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISPPLANE_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBX888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBX101010;
+ break;
+ default:
+ DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
+ return -EINVAL;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+
+ /* must disable */
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ I915_WRITE(reg, dspcntr);
+
+ linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ intel_crtc->dspaddr_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
+ linear_offset -= intel_crtc->dspaddr_offset;
+
+ DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
+ obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+ I915_MODIFY_DISPBASE(DSPSURF(plane),
+ obj->gtt_offset + intel_crtc->dspaddr_offset);
+ if (IS_HASWELL(dev)) {
+ I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
+ } else {
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
+ }
+ POSTING_READ(reg);
+
+ return 0;
+}
+
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+static int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
+ intel_increase_pllclock(crtc);
+
+ return dev_priv->display.update_plane(crtc, fb, x, y);
+}
+
+void intel_display_handle_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+
+ /*
+ * Flips in the rings have been nuked by the reset,
+ * so complete all pending flips so that user space
+ * will get its events and not get stuck.
+ *
+ * Also update the base address of all primary
+ * planes to the the last fb to make sure we're
+ * showing the correct fb after a reset.
+ *
+ * Need to make two loops over the crtcs so that we
+ * don't try to grab a crtc mutex before the
+ * pending_flip_queue really got woken up.
+ */
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum plane plane = intel_crtc->plane;
+
+ intel_prepare_page_flip(dev, plane);
+ intel_finish_page_flip_plane(dev, plane);
+ }
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ mutex_lock(&crtc->mutex);
+ if (intel_crtc->active)
+ dev_priv->display.update_plane(crtc, crtc->fb,
+ crtc->x, crtc->y);
+ mutex_unlock(&crtc->mutex);
+ }
+}
+
+static int
+intel_finish_fb(struct drm_framebuffer *old_fb)
+{
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ bool was_interruptible = dev_priv->mm.interruptible;
+ int ret;
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+ * current scanout is retired before unpinning the old
+ * framebuffer.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
+ */
+ dev_priv->mm.interruptible = false;
+ ret = i915_gem_object_finish_gpu(obj);
+ dev_priv->mm.interruptible = was_interruptible;
+
+ return ret;
+}
+
+static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_master_private *master_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!dev->primary->master)
+ return;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (!master_priv->sarea_priv)
+ return;
+
+ switch (intel_crtc->pipe) {
+ case 0:
+ master_priv->sarea_priv->pipeA_x = x;
+ master_priv->sarea_priv->pipeA_y = y;
+ break;
+ case 1:
+ master_priv->sarea_priv->pipeB_x = x;
+ master_priv->sarea_priv->pipeB_y = y;
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_framebuffer *old_fb;
+ int ret;
+
+ /* no fb bound */
+ if (!fb) {
+ DRM_ERROR("No FB bound\n");
+ return 0;
+ }
+
+ if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
+ DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
+ plane_name(intel_crtc->plane),
+ INTEL_INFO(dev)->num_pipes);
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ ret = intel_pin_and_fence_fb_obj(dev,
+ to_intel_framebuffer(fb)->obj,
+ NULL);
+ if (ret != 0) {
+ mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("pin & fence failed\n");
+ return ret;
+ }
+
+ ret = dev_priv->display.update_plane(crtc, fb, x, y);
+ if (ret) {
+ intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("failed to update base address\n");
+ return ret;
+ }
+
+ old_fb = crtc->fb;
+ crtc->fb = fb;
+ crtc->x = x;
+ crtc->y = y;
+
+ if (old_fb) {
+ if (intel_crtc->active && old_fb != fb)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ }
+
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ intel_crtc_update_sarea_pos(crtc, x, y);
+
+ return 0;
+}
+
+static void intel_fdi_normal_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* enable normal train */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (IS_IVYBRIDGE(dev)) {
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+ }
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+ /* wait one idle pattern time */
+ POSTING_READ(reg);
+ udelay(1000);
+
+ /* IVB wants error correction enabled */
+ if (IS_IVYBRIDGE(dev))
+ I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
+ FDI_FE_ERRC_ENABLE);
+}
+
+static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc)
+{
+ return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder;
+}
+
+static void ivb_modeset_global_resources(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *pipe_B_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+ struct intel_crtc *pipe_C_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
+ uint32_t temp;
+
+ /*
+ * When everything is off disable fdi C so that we could enable fdi B
+ * with all lanes. Note that we don't care about enabled pipes without
+ * an enabled pch encoder.
+ */
+ if (!pipe_has_enabled_pch(pipe_B_crtc) &&
+ !pipe_has_enabled_pch(pipe_C_crtc)) {
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ temp &= ~FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("disabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ }
+}
+
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp, tries;
+
+ /* FDI needs bits from pipe & plane first */
+ assert_pipe_enabled(dev_priv, pipe);
+ assert_plane_enabled(dev_priv, plane);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(reg, temp);
+ POSTING_READ(reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(150);
+
+ /* Ironlake workaround, enable clock pointer after FDI enable*/
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+ FDI_RX_PHASE_SYNC_POINTER_EN);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+ }
+ if (tries == 5)
+ DRM_ERROR("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(150);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (tries == 5)
+ DRM_ERROR("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done\n");
+
+}
+
+static const int snb_b_fdi_train_param [] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+ FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp, i, retry;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_BIT_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
+ }
+ udelay(50);
+ }
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ DRM_ERROR("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_GEN6(dev)) {
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ udelay(50);
+ }
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ DRM_ERROR("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+/* Manual link training for Ivy Bridge A0 parts */
+static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp, i;
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(150);
+
+ DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
+ I915_READ(FDI_RX_IIR(pipe)));
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ temp |= FDI_COMPOSITE_SYNC;
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_AUTO;
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ temp |= FDI_COMPOSITE_SYNC;
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(500);
+
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK ||
+ (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_ERROR("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(500);
+
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_ERROR("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+
+ /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(200);
+
+ /* Switch from Rawclk to PCDclk */
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp | FDI_PCDCLK);
+
+ POSTING_READ(reg);
+ udelay(200);
+
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+ }
+}
+
+static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_PCDCLK);
+
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+
+ /* Wait for the clocks to turn off. */
+ POSTING_READ(reg);
+ udelay(100);
+}
+
+static void ironlake_fdi_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev)) {
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ }
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(100);
+}
+
+static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long flags;
+ bool pending;
+
+ if (i915_reset_in_progress(&dev_priv->gpu_error) ||
+ intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ return false;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ pending = to_intel_crtc(crtc)->unpin_work != NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return pending;
+}
+
+static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+{
+ /* LINTED */
+ int ret;
+
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (crtc->fb == NULL)
+ return;
+
+ WARN_ON(mutex_is_locked(&dev_priv->pending_flip_queue.lock));
+
+ DRM_WAIT(ret, &dev_priv->pending_flip_queue,
+ !intel_crtc_has_pending_flip(crtc));
+
+ mutex_lock(&dev->struct_mutex);
+ intel_finish_fb(crtc->fb);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/* Program iCLKIP clock to the desired frequency */
+static void lpt_program_iclkip(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 divsel, phaseinc, auxdiv, phasedir = 0;
+ u32 temp;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* It is necessary to ungate the pixclk gate prior to programming
+ * the divisors, and gate it back when it is done.
+ */
+ I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+
+ /* Disable SSCCTL */
+ intel_sbi_write(dev_priv, SBI_SSCCTL6,
+ intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
+ SBI_SSCCTL_DISABLE,
+ SBI_ICLK);
+
+ /* 20MHz is a corner case which is out of range for the 7-bit divisor */
+ if (crtc->mode.clock == 20000) {
+ auxdiv = 1;
+ divsel = 0x41;
+ phaseinc = 0x20;
+ } else {
+ /* The iCLK virtual clock root frequency is in MHz,
+ * but the crtc->mode.clock in in KHz. To get the divisors,
+ * it is necessary to divide one by another, so we
+ * convert the virtual clock precision to KHz here for higher
+ * precision.
+ */
+ u32 iclk_virtual_root_freq = 172800 * 1000;
+ u32 iclk_pi_range = 64;
+ u32 desired_divisor, msb_divisor_value, pi_value;
+
+ desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
+ msb_divisor_value = desired_divisor / iclk_pi_range;
+ pi_value = desired_divisor % iclk_pi_range;
+
+ auxdiv = 0;
+ divsel = msb_divisor_value - 2;
+ phaseinc = pi_value;
+ }
+
+ /* This should not happen with any sane values */
+ WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+ WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+ DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+ crtc->mode.clock,
+ auxdiv,
+ divsel,
+ phasedir,
+ phaseinc);
+
+ /* Program SSCDIVINTPHASE6 */
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
+ temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
+ temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
+ temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
+ intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
+
+ /* Program SSCAUXDIV */
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
+ temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
+ intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
+
+ /* Enable modulator and associated divider */
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp &= ~SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+
+ /* Wait for initialization time */
+ udelay(24);
+
+ I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
+ enum pipe pch_transcoder)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+
+ I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
+ I915_READ(HTOTAL(cpu_transcoder)));
+ I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
+ I915_READ(HBLANK(cpu_transcoder)));
+ I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
+ I915_READ(HSYNC(cpu_transcoder)));
+
+ I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
+ I915_READ(VTOTAL(cpu_transcoder)));
+ I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
+ I915_READ(VBLANK(cpu_transcoder)));
+ I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
+ I915_READ(VSYNC(cpu_transcoder)));
+ I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+ I915_READ(VSYNCSHIFT(cpu_transcoder)));
+}
+
+/*
+ * Enable PCH resources required for PCH ports:
+ * - PCH PLLs
+ * - FDI training & RX/TX
+ * - update transcoder timings
+ * - DP transcoding bits
+ * - transcoder
+ */
+static void ironlake_pch_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ assert_pch_transcoder_disabled(dev_priv, pipe);
+
+ /* Write the TU size bits before fdi link training, so that error
+ * detection works. */
+ I915_WRITE(FDI_RX_TUSIZE1(pipe),
+ I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
+ /* For PCH output, training FDI link */
+ dev_priv->display.fdi_link_train(crtc);
+
+ /* XXX: pch pll's can be enabled any time before we enable the PCH
+ * transcoder, and we actually should do this to not upset any PCH
+ * transcoder that already use the clock when we share it.
+ *
+ * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
+ * unconditionally resets the pll - we need that to have the right LVDS
+ * enable sequence. */
+ ironlake_enable_shared_dpll(intel_crtc);
+
+ if (HAS_PCH_CPT(dev)) {
+ u32 sel;
+
+ temp = I915_READ(PCH_DPLL_SEL);
+ temp |= TRANS_DPLL_ENABLE(pipe);
+ sel = TRANS_DPLLB_SEL(pipe);
+ if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
+ temp |= sel;
+ else
+ temp &= ~sel;
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ }
+
+ /* set transcoder timing */
+ assert_panel_unlocked(dev_priv, pipe);
+ ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
+
+ intel_fdi_normal_train(crtc);
+
+ /* For PCH DP, enable TRANS_DP_CTL */
+ if (HAS_PCH_CPT(dev) &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+ reg = TRANS_DP_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(TRANS_DP_PORT_SEL_MASK |
+ TRANS_DP_SYNC_MASK |
+ TRANS_DP_BPC_MASK);
+ temp |= (TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING);
+ temp |= bpc << 9; /* same format but at 11:9 */
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+
+ switch (intel_trans_dp_port_sel(crtc)) {
+ case PCH_DP_B:
+ temp |= TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ temp |= TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ temp |= TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ BUG();
+ }
+
+ I915_WRITE(reg, temp);
+ }
+
+ ironlake_enable_pch_transcoder(dev_priv, pipe);
+}
+
+static void lpt_pch_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+
+ assert_pch_transcoder_disabled(dev_priv, (enum pipe) TRANSCODER_A);
+
+ lpt_program_iclkip(crtc);
+
+ /* Set transcoder timing. */
+ ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
+
+ lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
+}
+
+static void intel_put_shared_dpll(struct intel_crtc *crtc)
+{
+ struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+
+ if (pll == NULL)
+ return;
+
+ if (pll->refcount == 0) {
+ DRM_ERROR("bad PCH PLL refcount\n");
+ return;
+ }
+
+ if (--pll->refcount == 0) {
+ WARN_ON(pll->on);
+ WARN_ON(pll->active);
+ if (pll->on || pll->active)
+ DRM_ERROR("PCH PLL refcount is 0, but it's still active");
+ }
+
+ crtc->config.shared_dpll = DPLL_ID_PRIVATE;
+}
+
+static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, u32 dpll, u32 fp)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+ enum intel_dpll_id i;
+
+ if (pll) {
+ DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
+ crtc->base.base.id, pll->name);
+ intel_put_shared_dpll(crtc);
+ }
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+ i = (enum intel_dpll_id)crtc->pipe;
+ pll = &dev_priv->shared_dplls[i];
+
+ DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
+ crtc->base.base.id, pll->name);
+
+ goto found;
+ }
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+
+ /* Only want to check enabled timings first */
+ if (pll->refcount == 0)
+ continue;
+
+ if ((dpll & 0x7fffffff) == (I915_READ(PCH_DPLL(pll->id)) & 0x7fffffff) &&
+ fp == I915_READ(PCH_FP0(pll->id))) {
+ DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
+ crtc->base.base.id,
+ pll->name, pll->refcount, pll->active);
+
+ goto found;
+ }
+ }
+
+ /* Ok no matching timings, maybe there's a free one? */
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+ if (pll->refcount == 0) {
+ DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
+ crtc->base.base.id, pll->name);
+ goto found;
+ }
+ }
+
+ return NULL;
+
+found:
+ crtc->config.shared_dpll = i;
+ DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
+ pipe_name(crtc->pipe));
+
+ if (pll->active == 0) {
+ memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
+ sizeof(pll->hw_state));
+
+ DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
+ WARN_ON(pll->on);
+ assert_shared_dpll_disabled(dev_priv, pll);
+
+ /* Wait for the clocks to stabilize before rewriting the regs */
+ I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(PCH_DPLL(pll->id));
+ udelay(150);
+
+ I915_WRITE(PCH_FP0(pll->id), fp);
+ I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
+ }
+ pll->refcount++;
+
+ return pll;
+}
+
+static void cpt_verify_modeset(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dslreg = PIPEDSL(pipe);
+ u32 temp;
+
+ temp = I915_READ(dslreg);
+ udelay(500);
+ if (wait_for(I915_READ(dslreg) != temp, 5)) {
+ if (wait_for(I915_READ(dslreg) != temp, 5))
+ DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
+ }
+}
+
+static void ironlake_pfit_enable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+
+ if (crtc->config.pch_pfit.size) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+ PF_PIPE_SEL_IVB(pipe));
+ else
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
+ I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
+ }
+}
+
+static void intel_enable_planes(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct intel_plane *intel_plane;
+
+ list_for_each_entry(intel_plane, struct intel_plane, &dev->mode_config.plane_list, base.head)
+ if (intel_plane->pipe == pipe)
+ intel_plane_restore(&intel_plane->base);
+}
+
+static void intel_disable_planes(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct intel_plane *intel_plane;
+
+ list_for_each_entry(intel_plane, struct intel_plane, &dev->mode_config.plane_list, base.head)
+ if (intel_plane->pipe == pipe)
+ intel_plane_disable(&intel_plane->base);
+}
+
+static void ironlake_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 temp;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
+
+ intel_update_watermarks(dev);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if ((temp & LVDS_PORT_EN) == 0)
+ I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+ }
+
+
+ if (intel_crtc->config.has_pch_encoder) {
+ /* Note: FDI PLL enabling _must_ be done before we enable the
+ * cpu pipes, hence this is separate from all the other fdi/pch
+ * enabling. */
+ ironlake_fdi_pll_enable(intel_crtc);
+ } else {
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+ }
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
+ ironlake_pfit_enable(intel_crtc);
+
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_crtc_load_lut(crtc);
+
+ intel_enable_pipe(dev_priv, pipe,
+ intel_crtc->config.has_pch_encoder);
+ intel_enable_plane(dev_priv, plane, pipe);
+ intel_enable_planes(crtc);
+ intel_crtc_update_cursor(crtc, true);
+
+ if (intel_crtc->config.has_pch_encoder)
+ ironlake_pch_enable(crtc);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ if (HAS_PCH_CPT(dev))
+ cpt_verify_modeset(dev, intel_crtc->pipe);
+
+ /*
+ * There seems to be a race in PCH platform hw (at least on some
+ * outputs) where an enabled pipe still completes any pageflip right
+ * away (as if the pipe is off) instead of waiting for vblank. As soon
+ * as the first vblank happend, everything works as expected. Hence just
+ * wait for one vblank before returning to avoid strange things
+ * happening.
+ */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+}
+
+/* IPS only exists on ULT machines and is tied to pipe A. */
+static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
+{
+ return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
+}
+
+static void hsw_enable_ips(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+ if (!crtc->config.ips_enabled)
+ return;
+
+ /* We can only enable IPS after we enable a plane and wait for a vblank.
+ * We guarantee that the plane is enabled by calling intel_enable_ips
+ * only after intel_enable_plane. And intel_enable_plane already waits
+ * for a vblank, so all we need to do here is to enable the IPS bit. */
+ assert_plane_enabled(dev_priv, crtc->plane);
+ I915_WRITE(IPS_CTL, IPS_ENABLE);
+}
+
+static void hsw_disable_ips(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!crtc->config.ips_enabled)
+ return;
+
+ assert_plane_enabled(dev_priv, crtc->plane);
+ I915_WRITE(IPS_CTL, 0);
+
+ /* We need to wait for a vblank before we can disable the plane. */
+ intel_wait_for_vblank(dev, crtc->pipe);
+}
+
+static void haswell_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+
+ intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+ if (intel_crtc->config.has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+
+ intel_update_watermarks(dev);
+
+ if (intel_crtc->config.has_pch_encoder)
+ dev_priv->display.fdi_link_train(crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
+ intel_ddi_enable_pipe_clock(intel_crtc);
+
+ ironlake_pfit_enable(intel_crtc);
+
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_crtc_load_lut(crtc);
+
+ intel_ddi_set_pipe_settings(crtc);
+ intel_ddi_enable_transcoder_func(crtc);
+
+ intel_enable_pipe(dev_priv, pipe,
+ intel_crtc->config.has_pch_encoder);
+ intel_enable_plane(dev_priv, plane, pipe);
+ intel_enable_planes(crtc);
+ intel_crtc_update_cursor(crtc, true);
+
+ hsw_enable_ips(intel_crtc);
+
+ if (intel_crtc->config.has_pch_encoder)
+ lpt_pch_enable(crtc);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ /*
+ * There seems to be a race in PCH platform hw (at least on some
+ * outputs) where an enabled pipe still completes any pageflip right
+ * away (as if the pipe is off) instead of waiting for vblank. As soon
+ * as the first vblank happend, everything works as expected. Hence just
+ * wait for one vblank before returning to avoid strange things
+ * happening.
+ */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+}
+
+static void ironlake_pfit_disable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+
+ /* To avoid upsetting the power well on haswell only disable the pfit if
+ * it's in use. The hw state code will make sure we get this right. */
+ if (crtc->config.pch_pfit.size) {
+ I915_WRITE(PF_CTL(pipe), 0);
+ I915_WRITE(PF_WIN_POS(pipe), 0);
+ I915_WRITE(PF_WIN_SZ(pipe), 0);
+ }
+}
+
+static void ironlake_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 reg, temp;
+
+
+ if (!intel_crtc->active)
+ return;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
+
+ intel_crtc_update_cursor(crtc, false);
+ intel_disable_planes(crtc);
+ intel_disable_plane(dev_priv, plane, pipe);
+
+ if (intel_crtc->config.has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
+
+ intel_disable_pipe(dev_priv, pipe);
+
+ ironlake_pfit_disable(intel_crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
+ if (intel_crtc->config.has_pch_encoder) {
+ ironlake_fdi_disable(crtc);
+
+ ironlake_disable_pch_transcoder(dev_priv, pipe);
+ intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
+
+ if (HAS_PCH_CPT(dev)) {
+ /* disable TRANS_DP_CTL */
+ reg = TRANS_DP_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_PORT_SEL_MASK);
+ temp |= TRANS_DP_PORT_SEL_NONE;
+ I915_WRITE(reg, temp);
+
+ /* disable DPLL_SEL */
+ temp = I915_READ(PCH_DPLL_SEL);
+ temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ }
+
+ /* disable PCH DPLL */
+ intel_disable_shared_dpll(intel_crtc);
+
+ ironlake_fdi_pll_disable(intel_crtc);
+ }
+
+ intel_crtc->active = false;
+ intel_update_watermarks(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void haswell_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+
+ if (!intel_crtc->active)
+ return;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+
+ /* FBC must be disabled before disabling the plane on HSW. */
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
+
+ hsw_disable_ips(intel_crtc);
+
+ intel_crtc_update_cursor(crtc, false);
+ intel_disable_planes(crtc);
+ intel_disable_plane(dev_priv, plane, pipe);
+
+ if (intel_crtc->config.has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
+ intel_disable_pipe(dev_priv, pipe);
+
+ intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+
+ ironlake_pfit_disable(intel_crtc);
+
+ intel_ddi_disable_pipe_clock(intel_crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
+ if (intel_crtc->config.has_pch_encoder) {
+ lpt_disable_pch_transcoder(dev_priv);
+ intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+ intel_ddi_fdi_disable(crtc);
+ }
+
+ intel_crtc->active = false;
+ intel_update_watermarks(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void ironlake_crtc_off(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ intel_put_shared_dpll(intel_crtc);
+}
+
+static void haswell_crtc_off(struct drm_crtc *crtc)
+{
+ intel_ddi_put_crtc_pll(crtc);
+}
+
+static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+{
+ if (!enable && intel_crtc->overlay) {
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ dev_priv->mm.interruptible = false;
+ (void) intel_overlay_switch_off(intel_crtc->overlay);
+ dev_priv->mm.interruptible = true;
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ /* Let userspace switch the overlay on again. In most cases userspace
+ * has to recompute where to put it anyway.
+ */
+}
+
+/**
+ * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
+ * cursor plane briefly if not already running after enabling the display
+ * plane.
+ * This workaround avoids occasional blank screens when self refresh is
+ * enabled.
+ */
+static void
+g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ u32 cntl = I915_READ(CURCNTR(pipe));
+
+ if ((cntl & CURSOR_MODE) == 0) {
+ u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
+
+ I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
+ I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+ I915_WRITE(CURCNTR(pipe), cntl);
+ I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+ I915_WRITE(FW_BLC_SELF, fw_bcl_self);
+ }
+}
+
+static void i9xx_pfit_enable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc_config *pipe_config = &crtc->config;
+
+ if (!crtc->config.gmch_pfit.control)
+ return;
+
+ /*
+ * The panel fitter should only be adjusted whilst the pipe is disabled,
+ * according to register description and PRM.
+ */
+ WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
+ assert_pipe_disabled(dev_priv, crtc->pipe);
+
+ I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
+
+ /* Border color in case we don't scale up to the full screen. Black by
+ * default, change to something else for debugging. */
+ I915_WRITE(BCLRPAT(crtc->pipe), 0);
+}
+
+static void valleyview_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
+
+ intel_enable_pll(dev_priv, pipe);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
+ /* VLV wants encoder enabling _before_ the pipe is up. */
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+
+ i9xx_pfit_enable(intel_crtc);
+
+ intel_crtc_load_lut(crtc);
+
+ intel_enable_pipe(dev_priv, pipe, false);
+ intel_enable_plane(dev_priv, plane, pipe);
+ intel_enable_planes(crtc);
+ intel_crtc_update_cursor(crtc, true);
+
+ intel_update_fbc(dev);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void i9xx_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+
+ WARN_ON(!crtc->enabled);
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
+
+ intel_enable_pll(dev_priv, pipe);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
+
+ i9xx_pfit_enable(intel_crtc);
+
+ intel_crtc_load_lut(crtc);
+
+ intel_enable_pipe(dev_priv, pipe, false);
+ intel_enable_plane(dev_priv, plane, pipe);
+ intel_enable_planes(crtc);
+ /* The fixup needs to happen before cursor is enabled */
+ if (IS_G4X(dev))
+ g4x_fixup_plane(dev_priv, pipe);
+ intel_crtc_update_cursor(crtc, true);
+
+ /* Give the overlay scaler a chance to enable if it's on this pipe */
+ intel_crtc_dpms_overlay(intel_crtc, true);
+
+ intel_update_fbc(dev);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
+}
+
+static void i9xx_pfit_disable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!crtc->config.gmch_pfit.control)
+ return;
+
+ assert_pipe_disabled(dev_priv, crtc->pipe);
+
+ DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
+ I915_READ(PFIT_CONTROL));
+ I915_WRITE(PFIT_CONTROL, 0);
+}
+
+static void i9xx_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_encoder *encoder;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+
+ if (!intel_crtc->active)
+ return;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->disable(encoder);
+
+ /* Give the overlay scaler a chance to disable if it's on this pipe */
+ intel_crtc_wait_for_pending_flips(crtc);
+ drm_vblank_off(dev, pipe);
+
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
+
+ intel_crtc_dpms_overlay(intel_crtc, false);
+ intel_crtc_update_cursor(crtc, false);
+ intel_disable_planes(crtc);
+ intel_disable_plane(dev_priv, plane, pipe);
+
+ intel_disable_pipe(dev_priv, pipe);
+
+ i9xx_pfit_disable(intel_crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
+ intel_disable_pll(dev_priv, pipe);
+
+ intel_crtc->active = false;
+ intel_update_fbc(dev);
+ intel_update_watermarks(dev);
+}
+
+static void i9xx_crtc_off(struct drm_crtc *crtc)
+{
+}
+
+static void intel_crtc_update_sarea(struct drm_crtc *crtc,
+ bool enabled)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_master_private *master_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+
+ if (!dev->primary->master)
+ return;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (!master_priv->sarea_priv)
+ return;
+
+ switch (pipe) {
+ case 0:
+ master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
+ master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
+ break;
+ case 1:
+ master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
+ master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
+ break;
+ default:
+ DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
+ break;
+ }
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ */
+void intel_crtc_update_dpms(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ bool enable = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+ enable |= intel_encoder->connectors_active;
+
+ if (enable)
+ dev_priv->display.crtc_enable(crtc);
+ else
+ dev_priv->display.crtc_disable(crtc);
+
+ intel_crtc_update_sarea(crtc, enable);
+}
+
+static void intel_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* crtc should still be enabled when we disable it. */
+ WARN_ON(!crtc->enabled);
+
+ dev_priv->display.crtc_disable(crtc);
+ intel_crtc->eld_vld = false;
+ intel_crtc_update_sarea(crtc, false);
+ dev_priv->display.off(crtc);
+
+ assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
+ assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
+
+ if (crtc->fb) {
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+ crtc->fb = NULL;
+ }
+
+ /* Update computed state. */
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder || !connector->encoder->crtc)
+ continue;
+
+ if (connector->encoder->crtc != crtc)
+ continue;
+
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ to_intel_encoder(connector->encoder)->connectors_active = false;
+ }
+}
+
+void intel_modeset_disable(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled)
+ intel_crtc_disable(crtc);
+ }
+}
+
+void intel_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder, intel_encoder->type_size);
+}
+
+/* Simple dpms helper for encodres with just one connector, no cloning and only
+ * one kind of off state. It clamps all !ON modes to fully OFF and changes the
+ * state of the entire output pipe. */
+void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
+{
+ if (mode == DRM_MODE_DPMS_ON) {
+ encoder->connectors_active = true;
+
+ intel_crtc_update_dpms(encoder->base.crtc);
+ } else {
+ encoder->connectors_active = false;
+
+ intel_crtc_update_dpms(encoder->base.crtc);
+ }
+}
+
+/* Cross check the actual hw state with our own modeset state tracking (and it's
+ * internal consistency). */
+static void intel_connector_check_state(struct intel_connector *connector)
+{
+ if (connector->get_hw_state(connector)) {
+ struct intel_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc;
+ bool encoder_enabled;
+ enum pipe pipe;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base));
+
+ if (connector->base.dpms == DRM_MODE_DPMS_OFF)
+ DRM_ERROR("wrong connector dpms state\n");
+ if (connector->base.encoder != &encoder->base)
+ DRM_ERROR("active connector not linked to encoder\n");
+ if (!encoder->connectors_active)
+ DRM_ERROR("encoder->connectors_active not set\n");
+
+ encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+ if (!encoder_enabled)
+ DRM_ERROR("encoder not enabled\n");
+ if (!encoder->base.crtc) {
+ DRM_ERROR("crtc is NULL");
+ return;
+ }
+
+ crtc = encoder->base.crtc;
+
+ if (!crtc->enabled)
+ DRM_ERROR("crtc not enabled\n");
+ if (!to_intel_crtc(crtc)->active)
+ DRM_ERROR("crtc not active\n");
+ if (pipe != to_intel_crtc(crtc)->pipe)
+ DRM_ERROR("encoder active on the wrong pipe\n");
+ }
+}
+
+/* Even simpler default implementation, if there's really no special case to
+ * consider. */
+void intel_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+
+ /* All the simple cases only support two dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ if (encoder->base.crtc)
+ intel_encoder_dpms(encoder, mode);
+ /* LINTED */
+ else
+ WARN_ON(encoder->connectors_active != false);
+
+ intel_modeset_check_state(connector->dev);
+}
+
+/* Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector. */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
+{
+ enum pipe pipe = 0;
+ struct intel_encoder *encoder = connector->encoder;
+
+ return encoder->get_hw_state(encoder, &pipe);
+}
+
+static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *pipe_B_crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+
+ DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ if (pipe_config->fdi_lanes > 4) {
+ DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return false;
+ }
+
+ if (IS_HASWELL(dev)) {
+ if (pipe_config->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
+ pipe_config->fdi_lanes);
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ if (INTEL_INFO(dev)->num_pipes == 2)
+ return true;
+
+ /* Ivybridge 3 pipe is really complicated */
+ switch (pipe) {
+ case PIPE_A:
+ return true;
+ case PIPE_B:
+ if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
+ pipe_config->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return false;
+ }
+ return true;
+ case PIPE_C:
+ if (!pipe_has_enabled_pch(pipe_B_crtc) ||
+ pipe_B_crtc->config.fdi_lanes <= 2) {
+ if (pipe_config->fdi_lanes > 2) {
+ DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return false;
+ }
+ } else {
+ DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+ return false;
+ }
+ return true;
+ default:
+ BUG();
+ }
+ return false;
+}
+
+int i915_lane_workaround = 0;
+int i915_default_lanes = 4;
+
+#define RETRY 1
+static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ int lane, link_bw, fdi_dotclock;
+ bool setup_ok, needs_recompute = false;
+
+retry:
+ /* FDI is a binary signal running at ~2.7GHz, encoding
+ * each output octet as 10 bits. The actual frequency
+ * is stored as a divider into a 100MHz clock, and the
+ * mode pixel clock is stored in units of 1KHz.
+ * Hence the bw of each lane in terms of the mode signal
+ * is:
+ */
+ link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+
+ fdi_dotclock = adjusted_mode->clock;
+ fdi_dotclock /= pipe_config->pixel_multiplier;
+
+ if (i915_lane_workaround)
+ lane = i915_default_lanes;
+ else
+ lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
+ pipe_config->pipe_bpp);
+
+ pipe_config->fdi_lanes = lane;
+
+ intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
+ link_bw, &pipe_config->fdi_m_n);
+
+ setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
+ intel_crtc->pipe, pipe_config);
+ if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
+ pipe_config->pipe_bpp -= 2*3;
+ DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
+ pipe_config->pipe_bpp);
+ needs_recompute = true;
+ pipe_config->bw_constrained = true;
+
+ goto retry;
+ }
+
+ if (needs_recompute)
+ return RETRY;
+
+ return setup_ok ? 0 : -EINVAL;
+}
+
+static void hsw_compute_ips_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ pipe_config->ips_enabled = i915_enable_ips &&
+ hsw_crtc_supports_ips(crtc) &&
+ pipe_config->pipe_bpp == 24;
+}
+
+static int intel_crtc_compute_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ /* FDI link clock is fixed at 2.7G */
+ if (pipe_config->requested_mode.clock * 3
+ > IRONLAKE_FDI_FREQ * 4)
+ return -EINVAL;
+ }
+
+ /* All interlaced capable intel hw wants timings in frames. Note though
+ * that intel_lvds_mode_fixup does some funny tricks with the crtc
+ * timings, so we need to be careful not to clobber these.*/
+ if (!pipe_config->timings_set)
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ /* Cantiga+ cannot handle modes with a hsync front porch of 0.
+ * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
+ */
+ if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+ adjusted_mode->hsync_start == adjusted_mode->hdisplay)
+ return -EINVAL;
+
+ if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
+ pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
+ } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
+ /* only a 8bpc pipe, with 6bpc dither through the panel fitter
+ * for lvds. */
+ pipe_config->pipe_bpp = 8*3;
+ }
+
+ if (HAS_IPS(dev))
+ hsw_compute_ips_config(crtc, pipe_config);
+
+ /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
+ * clock survives for now. */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ pipe_config->shared_dpll = crtc->config.shared_dpll;
+
+ if (pipe_config->has_pch_encoder)
+ return ironlake_fdi_compute_config(crtc, pipe_config);
+
+ return 0;
+}
+
+static int valleyview_get_display_clock_speed(struct drm_device *dev)
+{
+ return 400000; /* FIXME */
+}
+
+/* LINTED */
+static int i945_get_display_clock_speed(struct drm_device *dev)
+{
+ return 400000;
+}
+
+/* LINTED */
+static int i915_get_display_clock_speed(struct drm_device *dev)
+{
+ return 333000;
+}
+
+/* LINTED */
+static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
+{
+ return 200000;
+}
+
+static int i915gm_get_display_clock_speed(struct drm_device *dev)
+{
+ u16 gcfgc = 0;
+
+ (void) pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
+ return 133000;
+ else {
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_333_MHZ:
+ return 333000;
+ default:
+ case GC_DISPLAY_CLOCK_190_200_MHZ:
+ return 190000;
+ }
+ }
+}
+
+/* LINTED */
+static int i865_get_display_clock_speed(struct drm_device *dev)
+{
+ return 266000;
+}
+
+/* LINTED */
+static int i855_get_display_clock_speed(struct drm_device *dev)
+{
+ u16 hpllcc = 0;
+ /* Assume that the hardware is in the high speed state. This
+ * should be the default.
+ */
+ switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
+ case GC_CLOCK_133_200:
+ case GC_CLOCK_100_200:
+ return 200000;
+ case GC_CLOCK_166_250:
+ return 250000;
+ case GC_CLOCK_100_133:
+ return 133000;
+ }
+
+ /* Shouldn't happen */
+ return 0;
+}
+
+/* LINTED */
+static int i830_get_display_clock_speed(struct drm_device *dev)
+{
+ return 133000;
+}
+
+static void
+intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
+{
+ while (*num > DATA_LINK_M_N_MASK ||
+ *den > DATA_LINK_M_N_MASK) {
+ *num >>= 1;
+ *den >>= 1;
+ }
+}
+
+static unsigned int
+roundup_pow_of_two(unsigned int n)
+{
+ unsigned int ret_val = 0;
+ unsigned int temp = n;
+
+ if (n == 0)
+ return 0;
+
+ while (temp != 1) {
+ ret_val++;
+ temp = temp >> 1;
+ }
+
+ if ((1 << ret_val) ^ n)
+ ret_val++;
+
+ return (1 << ret_val);
+}
+
+static void compute_m_n(unsigned int m, unsigned int n,
+ uint32_t *ret_m, uint32_t *ret_n)
+{
+ *ret_n = min(roundup_pow_of_two(n), DATA_LINK_N_MAX);
+ *ret_m = div_u64((uint64_t) m * *ret_n, n);
+ intel_reduce_m_n_ratio(ret_m, ret_n);
+}
+
+void
+intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n)
+{
+ m_n->tu = 64;
+ compute_m_n(bits_per_pixel * pixel_clock,
+ link_clock * nlanes * 8,
+ &m_n->gmch_m, &m_n->gmch_n);
+
+ compute_m_n(pixel_clock, link_clock,
+ &m_n->link_m, &m_n->link_n);
+}
+
+static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+{
+ if (i915_panel_use_ssc >= 0)
+ return i915_panel_use_ssc != 0;
+ return dev_priv->vbt.lvds_use_ssc
+ && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
+}
+
+/* LINTED */
+static int vlv_get_refclk(struct drm_crtc *crtc)
+{
+#if 0
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int refclk = 27000; /* for DP & HDMI */
+#endif
+ return 100000; /* only one validated so far */
+/*
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ refclk = 96000;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv))
+ refclk = 100000;
+ else
+ refclk = 96000;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ refclk = 100000;
+ }
+ return refclk;
+*/
+}
+
+static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int refclk;
+
+ if (IS_VALLEYVIEW(dev)) {
+ refclk = vlv_get_refclk(crtc);
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+ refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ refclk / 1000);
+ } else if (!IS_GEN2(dev)) {
+ refclk = 96000;
+ } else {
+ refclk = 48000;
+ }
+
+ return refclk;
+}
+
+static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
+{
+ return (1 << dpll->n) << 16 | dpll->m2;
+}
+
+static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
+{
+ return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
+}
+
+static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+ u32 fp, fp2 = 0;
+
+ if (IS_PINEVIEW(dev)) {
+ fp = pnv_dpll_compute_fp(&crtc->config.dpll);
+ if (reduced_clock)
+ fp2 = pnv_dpll_compute_fp(reduced_clock);
+ } else {
+ fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
+ if (reduced_clock)
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
+ }
+
+ I915_WRITE(FP0(pipe), fp);
+
+ crtc->lowfreq_avail = false;
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ reduced_clock && i915_powersave) {
+ I915_WRITE(FP1(pipe), fp2);
+ crtc->lowfreq_avail = true;
+ } else {
+ I915_WRITE(FP1(pipe), fp);
+ }
+}
+
+static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
+{
+ u32 reg_val;
+
+ /*
+ * PLLB opamp always calibrates to max value of 0x3f, force enable it
+ * and set it to a reasonable value instead.
+ */
+ reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
+ reg_val &= 0xffffff00;
+ reg_val |= 0x00000030;
+ vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
+ reg_val &= 0x8cffffff;
+ reg_val = 0x8c000000;
+ vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
+ reg_val &= 0xffffff00;
+ vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
+ reg_val &= 0x00ffffff;
+ reg_val |= 0xb0000000;
+ vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
+}
+
+static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+
+ I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+ I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
+ I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
+ I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
+}
+
+static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = crtc->pipe;
+ enum transcoder transcoder = crtc->config.cpu_transcoder;
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
+ I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
+ I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
+ I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
+ } else {
+ I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+ I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
+ I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
+ I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
+ }
+}
+
+static void intel_dp_set_m_n(struct intel_crtc *crtc)
+{
+ if (crtc->config.has_pch_encoder)
+ intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
+ else
+ intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
+}
+
+static void vlv_update_pll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
+ int pipe = crtc->pipe;
+ u32 dpll, mdiv;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2;
+ u32 coreclk, reg_val, dpll_md;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ bestn = crtc->config.dpll.n;
+ bestm1 = crtc->config.dpll.m1;
+ bestm2 = crtc->config.dpll.m2;
+ bestp1 = crtc->config.dpll.p1;
+ bestp2 = crtc->config.dpll.p2;
+
+ /* See eDP HDMI DPIO driver vbios notes doc */
+
+ /* PLL B needs special handling */
+ if (pipe)
+ vlv_pllb_recal_opamp(dev_priv);
+
+ /* Set up Tx target for periodic Rcomp update */
+ vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f);
+
+ /* Disable target IRef on PLL */
+ reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe));
+ reg_val &= 0x00ffffff;
+ vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val);
+
+ /* Disable fast lock */
+ vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610);
+
+ /* Set idtafcrecal before PLL is enabled */
+ mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
+ mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
+ mdiv |= ((bestn << DPIO_N_SHIFT));
+ mdiv |= (1 << DPIO_K_SHIFT);
+
+ /*
+ * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
+ * but we don't support that).
+ * Note: don't use the DAC post divider as it seems unstable.
+ */
+ mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
+ vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+
+ mdiv |= DPIO_ENABLE_CALIBRATION;
+ vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+
+ /* Set HBR and RBR LPF coefficients */
+ if (crtc->config.port_clock == 162000 ||
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
+ vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
+ 0x005f0021);
+ else
+ vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
+ 0x00d0000f);
+
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
+ /* Use SSC source */
+ if (!pipe)
+ vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+ 0x0df40000);
+ else
+ vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+ 0x0df70000);
+ } else { /* HDMI or VGA */
+ /* Use bend source */
+ if (!pipe)
+ vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+ 0x0df70000);
+ else
+ vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+ 0x0df40000);
+ }
+
+ coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe));
+ coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
+ coreclk |= 0x01000000;
+ vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk);
+
+ vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
+
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
+
+ /* Enable DPIO clock input */
+ dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
+ DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+ if (pipe)
+ dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll);
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ DRM_ERROR("DPLL %d failed to lock\n", pipe);
+
+ dpll_md = (crtc->config.pixel_multiplier - 1)
+ << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ I915_WRITE(DPLL_MD(pipe), dpll_md);
+ POSTING_READ(DPLL_MD(pipe));
+
+ if (crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(crtc);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void i9xx_update_pll(struct intel_crtc *crtc,
+ intel_clock_t *reduced_clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
+ int pipe = crtc->pipe;
+ u32 dpll;
+ bool is_sdvo;
+ struct dpll *clock = &crtc->config.dpll;
+
+ i9xx_update_pll_dividers(crtc, reduced_clock);
+
+ is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ dpll |= (crtc->config.pixel_multiplier - 1)
+ << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+
+ if (is_sdvo)
+ dpll |= DPLL_DVO_HIGH_SPEED;
+
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
+ dpll |= DPLL_DVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ if (IS_PINEVIEW(dev))
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+ else {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (IS_G4X(dev) && reduced_clock)
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ }
+ switch (clock->p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ if (INTEL_INFO(dev)->gen >= 4)
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+ if (crtc->config.sdvo_tv_clock)
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
+
+ if (crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(crtc);
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ u32 dpll_md = (crtc->config.pixel_multiplier - 1)
+ << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ I915_WRITE(DPLL_MD(pipe), dpll_md);
+ } else {
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(DPLL(pipe), dpll);
+ }
+}
+
+static void i8xx_update_pll(struct intel_crtc *crtc,
+ intel_clock_t *reduced_clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
+ int pipe = crtc->pipe;
+ u32 dpll;
+ struct dpll *clock = &crtc->config.dpll;
+
+ i9xx_update_pll_dividers(crtc, reduced_clock);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ } else {
+ if (clock->p1 == 2)
+ dpll |= PLL_P1_DIVIDE_BY_TWO;
+ else
+ dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (clock->p2 == 4)
+ dpll |= PLL_P2_DIVIDE_BY_4;
+ }
+
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(DPLL(pipe), dpll);
+}
+
+static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+ uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
+
+ /* We need to be careful not to changed the adjusted mode, for otherwise
+ * the hw state checker will get angry at the mismatch. */
+ crtc_vtotal = adjusted_mode->crtc_vtotal;
+ crtc_vblank_end = adjusted_mode->crtc_vblank_end;
+
+ if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* the chip adds 2 halflines automatically */
+ crtc_vtotal -= 1;
+ crtc_vblank_end -= 1;
+ vsyncshift = adjusted_mode->crtc_hsync_start
+ - adjusted_mode->crtc_htotal / 2;
+ } else {
+ vsyncshift = 0;
+ }
+
+ if (INTEL_INFO(dev)->gen > 3)
+ I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
+
+ I915_WRITE(HTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ I915_WRITE(HBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ I915_WRITE(HSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+ I915_WRITE(VTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_vdisplay - 1) |
+ ((crtc_vtotal - 1) << 16));
+ I915_WRITE(VBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_vblank_start - 1) |
+ ((crtc_vblank_end - 1) << 16));
+ I915_WRITE(VSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+ /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
+ * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
+ * documented on the DDI_FUNC_CTL register description, EDP Input Select
+ * bits. */
+ if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+ (pipe == PIPE_B || pipe == PIPE_C))
+ I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+
+ /* pipesrc controls the size that is scaled from, which should
+ * always be the user's requested size.
+ */
+ I915_WRITE(PIPESRC(pipe),
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+}
+
+static void intel_get_pipe_timings(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ uint32_t tmp;
+
+ tmp = I915_READ(HTOTAL(cpu_transcoder));
+ pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
+ pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+ tmp = I915_READ(HBLANK(cpu_transcoder));
+ pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
+ pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
+ tmp = I915_READ(HSYNC(cpu_transcoder));
+ pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
+ pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
+
+ tmp = I915_READ(VTOTAL(cpu_transcoder));
+ pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
+ pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+ tmp = I915_READ(VBLANK(cpu_transcoder));
+ pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
+ pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
+ tmp = I915_READ(VSYNC(cpu_transcoder));
+ pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
+ pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+
+ if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
+ pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ pipe_config->adjusted_mode.crtc_vtotal += 1;
+ pipe_config->adjusted_mode.crtc_vblank_end += 1;
+ }
+
+ tmp = I915_READ(PIPESRC(crtc->pipe));
+ pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1;
+ pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
+}
+
+static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t pipeconf;
+
+ pipeconf = 0;
+
+ if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
+ /* Enable pixel doubling when the dot clock is > 90% of the (display)
+ * core speed.
+ *
+ * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
+ * pipe == 0 check?
+ */
+ if (intel_crtc->config.requested_mode.clock >
+ dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
+ pipeconf |= PIPECONF_DOUBLE_WIDE;
+ }
+
+ /* only g4x and later have fancy bpc/dither controls */
+ if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+ /* Bspec claims that we can't use dithering for 30bpp pipes. */
+ if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
+ pipeconf |= PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+
+ switch (intel_crtc->config.pipe_bpp) {
+ case 18:
+ pipeconf |= PIPECONF_6BPC;
+ break;
+ case 24:
+ pipeconf |= PIPECONF_8BPC;
+ break;
+ case 30:
+ pipeconf |= PIPECONF_10BPC;
+ break;
+ default:
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ BUG();
+ }
+ }
+
+ if (HAS_PIPE_CXSR(dev)) {
+ if (intel_crtc->lowfreq_avail) {
+ DRM_DEBUG_KMS("enabling CxSR downclocking\n");
+ pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
+ } else {
+ DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+ }
+ }
+
+ if (!IS_GEN2(dev) &&
+ intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ else
+ pipeconf |= PIPECONF_PROGRESSIVE;
+
+ if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
+ pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+
+ I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
+ POSTING_READ(PIPECONF(intel_crtc->pipe));
+}
+
+static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int refclk, num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dspcntr;
+ bool ok, has_reduced_clock = false;
+ bool is_lvds = false;
+ struct intel_encoder *encoder;
+ const intel_limit_t *limit;
+ int ret;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ refclk = i9xx_get_refclk(crtc, num_connectors);
+
+ /*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc, refclk);
+ ok = dev_priv->display.find_dpll(limit, crtc,
+ intel_crtc->config.port_clock,
+ refclk, NULL, &clock);
+ if (!ok && !intel_crtc->config.clock_set) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
+ has_reduced_clock =
+ dev_priv->display.find_dpll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk, &clock,
+ &reduced_clock);
+ }
+ /* Compat-code for transition, will disappear. */
+ if (!intel_crtc->config.clock_set) {
+ intel_crtc->config.dpll.n = clock.n;
+ intel_crtc->config.dpll.m1 = clock.m1;
+ intel_crtc->config.dpll.m2 = clock.m2;
+ intel_crtc->config.dpll.p1 = clock.p1;
+ intel_crtc->config.dpll.p2 = clock.p2;
+ }
+
+ if (IS_GEN2(dev))
+ i8xx_update_pll(intel_crtc,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
+ else if (IS_VALLEYVIEW(dev))
+ vlv_update_pll(intel_crtc);
+ else
+ i9xx_update_pll(intel_crtc,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (!IS_VALLEYVIEW(dev)) {
+ if (pipe == 0)
+ dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+ }
+
+ intel_set_pipe_timings(intel_crtc);
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ I915_WRITE(DSPSIZE(plane),
+ ((mode->vdisplay - 1) << 16) |
+ (mode->hdisplay - 1));
+ I915_WRITE(DSPPOS(plane), 0);
+
+ i9xx_set_pipeconf(intel_crtc);
+
+ I915_WRITE(DSPCNTR(plane), dspcntr);
+ POSTING_READ(DSPCNTR(plane));
+
+ ret = intel_pipe_set_base(crtc, x, y, fb);
+
+ intel_update_watermarks(dev);
+
+ return ret;
+}
+
+static void i9xx_get_pfit_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = I915_READ(PFIT_CONTROL);
+ if (!(tmp & PFIT_ENABLE))
+ return;
+
+ /* Check whether the pfit is attached to our pipe. */
+ if (INTEL_INFO(dev)->gen < 4) {
+ if (crtc->pipe != PIPE_B)
+ return;
+ } else {
+ if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
+ return;
+ }
+
+ pipe_config->gmch_pfit.control = tmp;
+ pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
+ if (INTEL_INFO(dev)->gen < 5)
+ pipe_config->gmch_pfit.lvds_border_bits =
+ I915_READ(LVDS) & LVDS_BORDER_ENABLE;
+}
+
+static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ pipe_config->cpu_transcoder = (enum transcoder)crtc->pipe;
+ pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+
+ tmp = I915_READ(PIPECONF(crtc->pipe));
+ if (!(tmp & PIPECONF_ENABLE))
+ return false;
+
+ intel_get_pipe_timings(crtc, pipe_config);
+
+ i9xx_get_pfit_config(crtc, pipe_config);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ tmp = I915_READ(DPLL_MD(crtc->pipe));
+ pipe_config->pixel_multiplier =
+ ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
+ >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
+ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ tmp = I915_READ(DPLL(crtc->pipe));
+ pipe_config->pixel_multiplier =
+ ((tmp & SDVO_MULTIPLIER_MASK)
+ >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
+ } else {
+ /* Note that on i915G/GM the pixel multiplier is in the sdvo
+ * port and will be fixed up in the encoder->get_config
+ * function. */
+ pipe_config->pixel_multiplier = 1;
+ }
+
+ return true;
+}
+
+static void ironlake_init_pch_refclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ u32 val, final;
+ bool has_lvds = false;
+ bool has_cpu_edp = false;
+ bool has_panel = false;
+ bool has_ck505 = false;
+ bool can_ssc = false;
+
+ /* We need to take the global config into account */
+ list_for_each_entry(encoder, struct intel_encoder, &mode_config->encoder_list,
+ base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ has_panel = true;
+ has_lvds = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ has_panel = true;
+ if (enc_to_dig_port(&encoder->base)->port == PORT_A)
+ has_cpu_edp = true;
+ break;
+ }
+ }
+
+ if (HAS_PCH_IBX(dev)) {
+ has_ck505 = dev_priv->vbt.display_clock_mode;
+ can_ssc = has_ck505;
+ } else {
+ has_ck505 = false;
+ can_ssc = true;
+ }
+
+ DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
+ has_panel, has_lvds, has_ck505);
+
+ /* Ironlake: try to setup display ref clock before DPLL
+ * enabling. This is only under driver's control after
+ * PCH B stepping, previous chipset stepping should be
+ * ignoring this setting.
+ */
+ val = I915_READ(PCH_DREF_CONTROL);
+
+ /* As we must carefully and slowly disable/enable each source in turn,
+ * compute the final state we want first and check if we need to
+ * make any changes at all.
+ */
+ final = val;
+ final &= ~DREF_NONSPREAD_SOURCE_MASK;
+ if (has_ck505)
+ final |= DREF_NONSPREAD_CK505_ENABLE;
+ else
+ final |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+ final &= ~DREF_SSC_SOURCE_MASK;
+ final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+ final &= ~DREF_SSC1_ENABLE;
+
+ if (has_panel) {
+ final |= DREF_SSC_SOURCE_ENABLE;
+
+ if (intel_panel_use_ssc(dev_priv) && can_ssc)
+ final |= DREF_SSC1_ENABLE;
+
+ if (has_cpu_edp) {
+ if (intel_panel_use_ssc(dev_priv) && can_ssc)
+ final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ else
+ final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ } else
+ final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ } else {
+ final |= DREF_SSC_SOURCE_DISABLE;
+ final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ }
+
+ if (final == val)
+ return;
+
+ /* Always enable nonspread source */
+ val &= ~DREF_NONSPREAD_SOURCE_MASK;
+
+ if (has_ck505)
+ val |= DREF_NONSPREAD_CK505_ENABLE;
+ else
+ val |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+ if (has_panel) {
+ val &= ~DREF_SSC_SOURCE_MASK;
+ val |= DREF_SSC_SOURCE_ENABLE;
+
+ /* SSC must be turned on before enabling the CPU output */
+ if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ DRM_DEBUG_KMS("Using SSC on panel\n");
+ val |= DREF_SSC1_ENABLE;
+ } else
+ val &= ~DREF_SSC1_ENABLE;
+
+ /* Get SSC going before enabling the outputs */
+ I915_WRITE(PCH_DREF_CONTROL, val);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+
+ val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Enable CPU source on CPU attached eDP */
+ if (has_cpu_edp) {
+ if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ DRM_DEBUG_KMS("Using SSC on eDP\n");
+ val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ }
+ else
+ val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ } else
+ val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, val);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ } else {
+ DRM_DEBUG_KMS("Disabling SSC entirely\n");
+
+ val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Turn off CPU output */
+ val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, val);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+
+ /* Turn off the SSC source */
+ val &= ~DREF_SSC_SOURCE_MASK;
+ val |= DREF_SSC_SOURCE_DISABLE;
+
+ /* Turn off SSC1 */
+ val &= ~DREF_SSC1_ENABLE;
+
+ I915_WRITE(PCH_DREF_CONTROL, val);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ }
+
+ BUG_ON(val != final);
+}
+
+/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ bool has_vga = false;
+ bool is_sdv = false;
+ u32 tmp;
+
+ list_for_each_entry(encoder, struct intel_encoder, &mode_config->encoder_list, base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ has_vga = true;
+ break;
+ }
+ }
+
+ if (!has_vga)
+ return;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* XXX: Rip out SDV support once Haswell ships for real. */
+ if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
+ is_sdv = true;
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_DISABLE;
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ udelay(24);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ if (!is_sdv) {
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_atomic(I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 1))
+ DRM_ERROR("FDI mPHY reset assert timeout\n");
+
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_atomic((I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
+ 1))
+ DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+ tmp &= ~(0xFFUL << 24);
+ tmp |= (0x12 << 24);
+ intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+ if (is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
+ tmp |= 0x7FFF;
+ intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+ if (is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
+ tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+ intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
+ tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+ intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
+ tmp |= (0x3F << 8);
+ intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
+ tmp |= (0x3F << 8);
+ intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+ }
+
+ tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+ if (!is_sdv) {
+ tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp &= ~(0xFUL << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp &= ~(0xFUL << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+ }
+
+ /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
+ tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
+ tmp |= SBI_DBUFF0_ENABLE;
+ intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void intel_init_pch_refclk(struct drm_device *dev)
+{
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ ironlake_init_pch_refclk(dev);
+ else if (HAS_PCH_LPT(dev))
+ lpt_init_pch_refclk(dev);
+}
+
+static int ironlake_get_refclk(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
+ int num_connectors = 0;
+ bool is_lvds = false;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ }
+ num_connectors++;
+ }
+
+ if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ dev_priv->vbt.lvds_ssc_freq);
+ return dev_priv->vbt.lvds_ssc_freq * 1000;
+ }
+
+ return 120000;
+}
+
+static void ironlake_set_pipeconf(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ uint32_t val;
+
+ val = 0;
+
+ switch (intel_crtc->config.pipe_bpp) {
+ case 18:
+ val |= PIPECONF_6BPC;
+ break;
+ case 24:
+ val |= PIPECONF_8BPC;
+ break;
+ case 30:
+ val |= PIPECONF_10BPC;
+ break;
+ case 36:
+ val |= PIPECONF_12BPC;
+ break;
+ default:
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ BUG();
+ }
+
+ if (intel_crtc->config.dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ if (intel_crtc->config.limited_color_range)
+ val |= PIPECONF_COLOR_RANGE_SELECT;
+
+ I915_WRITE(PIPECONF(pipe), val);
+ POSTING_READ(PIPECONF(pipe));
+}
+
+/*
+ * Set up the pipe CSC unit.
+ *
+ * Currently only full range RGB to limited range RGB conversion
+ * is supported, but eventually this should handle various
+ * RGB<->YCbCr scenarios as well.
+ */
+static void intel_set_pipe_csc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ uint16_t coeff = 0x7800; /* 1.0 */
+
+ /*
+ * TODO: Check what kind of values actually come out of the pipe
+ * with these coeff/postoff values and adjust to get the best
+ * accuracy. Perhaps we even need to take the bpc value into
+ * consideration.
+ */
+
+ if (intel_crtc->config.limited_color_range)
+ coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
+
+ /*
+ * GY/GU and RY/RU should be the other way around according
+ * to BSpec, but reality doesn't agree. Just set them up in
+ * a way that results in the correct picture.
+ */
+ I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
+ I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
+
+ I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
+ I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
+
+ I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
+ I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
+
+ I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+
+ if (INTEL_INFO(dev)->gen > 6) {
+ uint16_t postoff = 0;
+
+ if (intel_crtc->config.limited_color_range)
+ postoff = (16 * (1 << 13) / 255) & 0x1fff;
+
+ I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
+
+ I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+ } else {
+ uint32_t mode = CSC_MODE_YUV_TO_RGB;
+
+ if (intel_crtc->config.limited_color_range)
+ mode |= CSC_BLACK_SCREEN_OFFSET;
+
+ I915_WRITE(PIPE_CSC_MODE(pipe), mode);
+ }
+}
+
+static void haswell_set_pipeconf(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ uint32_t val;
+
+ val = 0;
+
+ if (intel_crtc->config.dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ I915_WRITE(PIPECONF(cpu_transcoder), val);
+ POSTING_READ(PIPECONF(cpu_transcoder));
+
+ I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
+ POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
+}
+
+static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+ intel_clock_t *clock,
+ bool *has_reduced_clock,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ int refclk;
+ const intel_limit_t *limit;
+ bool ret, is_lvds = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ }
+ }
+
+ refclk = ironlake_get_refclk(crtc);
+
+ /*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc, refclk);
+ ret = dev_priv->display.find_dpll(limit, crtc,
+ to_intel_crtc(crtc)->config.port_clock,
+ refclk, NULL, clock);
+ if (!ret)
+ return false;
+
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
+ *has_reduced_clock =
+ dev_priv->display.find_dpll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk, clock,
+ reduced_clock);
+ }
+
+ return true;
+}
+
+static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t temp;
+
+ temp = I915_READ(SOUTH_CHICKEN1);
+ if (temp & FDI_BC_BIFURCATION_SELECT)
+ return;
+
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+ WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+ temp |= FDI_BC_BIFURCATION_SELECT;
+ DRM_DEBUG_KMS("enabling fdi C rx\n");
+ I915_WRITE(SOUTH_CHICKEN1, temp);
+ POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ switch (intel_crtc->pipe) {
+ case PIPE_A:
+ break;
+ case PIPE_B:
+ if (intel_crtc->config.fdi_lanes > 2) {
+ WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+ DRM_DEBUG_KMS("bc_bifurcation select 0x%x", I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+ } else
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ break;
+ case PIPE_C:
+ cpt_enable_fdi_bc_bifurcation(dev);
+
+ break;
+ default:
+ BUG();
+ }
+}
+
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+ /*
+ * Account for spread spectrum to avoid
+ * oversubscribing the link. Max center spread
+ * is 2.5%; use 5% for safety's sake.
+ */
+ u32 bps = target_clock * bpp * 21 / 20;
+ return bps / (link_bw * 8) + 1;
+}
+
+static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
+{
+ return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
+}
+
+static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+ u32 *fp,
+ intel_clock_t *reduced_clock, u32 *fp2)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ uint32_t dpll;
+ int factor, num_connectors = 0;
+ bool is_lvds = false, is_sdvo = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ /* Enable autotuning of the PLL clock (if permissible) */
+ factor = 21;
+ if (is_lvds) {
+ if ((intel_panel_use_ssc(dev_priv) &&
+ dev_priv->vbt.lvds_ssc_freq == 100) ||
+ (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
+ factor = 25;
+ } else if (intel_crtc->config.sdvo_tv_clock)
+ factor = 20;
+
+ if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
+ *fp |= FP_CB_TUNE;
+
+ if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
+ *fp2 |= FP_CB_TUNE;
+
+ dpll = 0;
+
+ if (is_lvds)
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ dpll |= (intel_crtc->config.pixel_multiplier - 1)
+ << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+
+ if (is_sdvo)
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ if (intel_crtc->config.has_dp_encoder)
+ dpll |= DPLL_DVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ /* also FPA1 */
+ dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+
+ switch (intel_crtc->config.dpll.p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+
+ if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ return dpll | DPLL_VCO_ENABLE;
+}
+
+static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ int num_connectors = 0;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll = 0, fp = 0, fp2 = 0;
+ bool ok, has_reduced_clock = false;
+ bool is_lvds = false;
+ struct intel_encoder *encoder;
+ struct intel_shared_dpll *pll;
+ int ret;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ }
+
+ num_connectors++;
+ }
+
+ if(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)))
+ DRM_ERROR("Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+
+ ok = ironlake_compute_clocks(crtc, &clock,
+ &has_reduced_clock, &reduced_clock);
+ if (!ok && !intel_crtc->config.clock_set) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+ /* Compat-code for transition, will disappear. */
+ if (!intel_crtc->config.clock_set) {
+ intel_crtc->config.dpll.n = clock.n;
+ intel_crtc->config.dpll.m1 = clock.m1;
+ intel_crtc->config.dpll.m2 = clock.m2;
+ intel_crtc->config.dpll.p1 = clock.p1;
+ intel_crtc->config.dpll.p2 = clock.p2;
+ }
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (intel_crtc->config.has_pch_encoder) {
+ fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
+ if (has_reduced_clock)
+ fp2 = i9xx_dpll_compute_fp(&reduced_clock);
+
+ dpll = ironlake_compute_dpll(intel_crtc,
+ &fp, &reduced_clock,
+ has_reduced_clock ? &fp2 : NULL);
+
+ intel_crtc->config.dpll_hw_state.dpll = dpll;
+ intel_crtc->config.dpll_hw_state.fp0 = fp;
+ if (has_reduced_clock)
+ intel_crtc->config.dpll_hw_state.fp1 = fp2;
+ else
+ intel_crtc->config.dpll_hw_state.fp1 = fp;
+
+ pll = intel_get_shared_dpll(intel_crtc, dpll, fp);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(pipe));
+ return -EINVAL;
+ }
+ } else
+ intel_put_shared_dpll(intel_crtc);
+
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
+
+ if (is_lvds && has_reduced_clock && i915_powersave)
+ intel_crtc->lowfreq_avail = true;
+ else
+ intel_crtc->lowfreq_avail = false;
+
+ if (intel_crtc->config.has_pch_encoder) {
+ pll = intel_crtc_to_shared_dpll(intel_crtc);
+ /*
+ * We previously verified the shared_dpll in the eDP case,
+ * so pll should not be NULL from above call.
+ */
+ BUG_ON(pll == NULL);
+ if (pll == NULL)
+ return -EINVAL;
+
+ I915_WRITE(PCH_DPLL(pll->id), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(PCH_DPLL(pll->id));
+ udelay(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(PCH_DPLL(pll->id), dpll);
+
+ if (has_reduced_clock)
+ I915_WRITE(PCH_FP1(pll->id), fp2);
+ else
+ I915_WRITE(PCH_FP1(pll->id), fp);
+ }
+
+ intel_set_pipe_timings(intel_crtc);
+
+ if (intel_crtc->config.has_pch_encoder) {
+ intel_cpu_transcoder_set_m_n(intel_crtc,
+ &intel_crtc->config.fdi_m_n);
+ }
+
+ if (IS_IVYBRIDGE(dev))
+ ivybridge_update_fdi_bc_bifurcation(intel_crtc);
+
+ ironlake_set_pipeconf(crtc);
+
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+ POSTING_READ(DSPCNTR(plane));
+
+ ret = intel_pipe_set_base(crtc, x, y, fb);
+
+ intel_update_watermarks(dev);
+
+ return ret;
+}
+
+static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder transcoder = pipe_config->cpu_transcoder;
+
+ pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder));
+ pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder));
+ pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
+ & ~TU_SIZE_MASK;
+ pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
+ pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder))
+ & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+}
+
+static void ironlake_get_pfit_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = I915_READ(PF_CTL(crtc->pipe));
+
+ if (tmp & PF_ENABLE) {
+ pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
+ pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
+
+ /* We currently do not free assignements of panel fitters on
+ * ivb/hsw (since we don't use the higher upscaling modes which
+ * differentiates them) so just WARN about this case for now. */
+ if (IS_GEN7(dev)) {
+ WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
+ PF_PIPE_SEL_IVB(crtc->pipe));
+ DRM_DEBUG("PF_CTL(crtc->pipe) 0x%x", tmp);
+ }
+ }
+}
+
+static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ pipe_config->cpu_transcoder = (enum transcoder)crtc->pipe;
+ pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+
+ tmp = I915_READ(PIPECONF(crtc->pipe));
+ if (!(tmp & PIPECONF_ENABLE))
+ return false;
+
+ if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
+ /* LINTED */
+ struct intel_shared_dpll *pll;
+
+ pipe_config->has_pch_encoder = true;
+
+ tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
+ pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+ FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+ ironlake_get_fdi_m_n_config(crtc, pipe_config);
+
+ /* XXX: Can't properly read out the pch dpll pixel multiplier
+ * since we don't have state tracking for pch clocks yet. */
+ pipe_config->pixel_multiplier = 1;
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ pipe_config->shared_dpll = (enum intel_dpll_id)crtc->pipe;
+ } else {
+ tmp = I915_READ(PCH_DPLL_SEL);
+ if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
+ pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
+ else
+ pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
+ }
+
+ pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
+
+ WARN_ON(!pll->get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
+ } else {
+ pipe_config->pixel_multiplier = 1;
+ }
+
+ intel_get_pipe_timings(crtc, pipe_config);
+
+ ironlake_get_pfit_config(crtc, pipe_config);
+
+ return true;
+}
+
+static void haswell_modeset_global_resources(struct drm_device *dev)
+{
+ bool enable = false;
+ struct intel_crtc *crtc;
+
+ list_for_each_entry(crtc, struct intel_crtc, &dev->mode_config.crtc_list, base.head) {
+ if (!crtc->base.enabled)
+ continue;
+
+ if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.size ||
+ crtc->config.cpu_transcoder != TRANSCODER_EDP)
+ enable = true;
+ }
+
+ intel_set_power_well(dev, enable);
+}
+
+static int haswell_crtc_mode_set(struct drm_crtc *crtc,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane;
+ int ret;
+
+ if (!intel_ddi_pll_mode_set(crtc))
+ return -EINVAL;
+
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc, true);
+
+ if (intel_crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(intel_crtc);
+
+ intel_crtc->lowfreq_avail = false;
+
+ intel_set_pipe_timings(intel_crtc);
+
+ if (intel_crtc->config.has_pch_encoder) {
+ intel_cpu_transcoder_set_m_n(intel_crtc,
+ &intel_crtc->config.fdi_m_n);
+ }
+
+ haswell_set_pipeconf(crtc);
+
+ intel_set_pipe_csc(crtc);
+
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
+ POSTING_READ(DSPCNTR(plane));
+
+ ret = intel_pipe_set_base(crtc, x, y, fb);
+
+ intel_update_watermarks(dev);
+
+ return ret;
+}
+
+static bool haswell_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain pfit_domain;
+ uint32_t tmp;
+
+ pipe_config->cpu_transcoder = (enum transcoder)crtc->pipe;
+ pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+ if (tmp & TRANS_DDI_FUNC_ENABLE) {
+ enum pipe trans_edp_pipe;
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ default:
+ DRM_ERROR("unknown pipe linked to edp transcoder\n");
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ trans_edp_pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ trans_edp_pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ trans_edp_pipe = PIPE_C;
+ break;
+ }
+
+ if (trans_edp_pipe == crtc->pipe)
+ pipe_config->cpu_transcoder = TRANSCODER_EDP;
+ }
+
+ if (!intel_display_power_enabled(dev,
+ POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
+ return false;
+
+ tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+ if (!(tmp & PIPECONF_ENABLE))
+ return false;
+
+ /*
+ * Haswell has only FDI/PCH transcoder A. It is which is connected to
+ * DDI E. So just check whether this pipe is wired to DDI E and whether
+ * the PCH transcoder is on.
+ */
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+ if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
+ I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+ pipe_config->has_pch_encoder = true;
+
+ tmp = I915_READ(FDI_RX_CTL(PIPE_A));
+ pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+ FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+ ironlake_get_fdi_m_n_config(crtc, pipe_config);
+ }
+
+ intel_get_pipe_timings(crtc, pipe_config);
+
+ pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
+ if (intel_display_power_enabled(dev, pfit_domain))
+ ironlake_get_pfit_config(crtc, pipe_config);
+
+ pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
+ (I915_READ(IPS_CTL) & IPS_ENABLE);
+
+ pipe_config->pixel_multiplier = 1;
+
+ return true;
+}
+
+static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ int x, int y,
+ struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct intel_encoder *encoder;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+ int pipe = intel_crtc->pipe;
+ int ret;
+
+ drm_vblank_pre_modeset(dev, pipe);
+
+ ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
+
+ drm_vblank_post_modeset(dev, pipe);
+
+ if (ret != 0)
+ return ret;
+
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base),
+ mode->base.id, mode->name);
+ if (encoder->mode_set) {
+ encoder->mode_set(encoder);
+ } else {
+ encoder_funcs = encoder->base.helper_private;
+ encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
+ }
+ }
+
+ return 0;
+}
+
+static bool intel_eld_uptodate(struct drm_connector *connector,
+ int reg_eldv, uint32_t bits_eldv,
+ int reg_elda, uint32_t bits_elda,
+ int reg_edid)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t i;
+
+ i = I915_READ(reg_eldv);
+ i &= bits_eldv;
+
+ if (!eld[0])
+ return !i;
+
+ if (!i)
+ return false;
+
+ i = I915_READ(reg_elda);
+ i &= ~bits_elda;
+ I915_WRITE(reg_elda, i);
+
+ for (i = 0; i < eld[2]; i++)
+ if (I915_READ(reg_edid) != *((uint32_t *)(uintptr_t)eld + i))
+ return false;
+
+ return true;
+}
+
+static void g4x_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t len;
+ uint32_t i;
+
+ i = I915_READ(G4X_AUD_VID_DID);
+
+ if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
+ eldv = G4X_ELDV_DEVCL_DEVBLC;
+ else
+ eldv = G4X_ELDV_DEVCTG;
+
+ if (intel_eld_uptodate(connector,
+ G4X_AUD_CNTL_ST, eldv,
+ G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
+ G4X_HDMIW_HDMIEDID))
+ return;
+
+ i = I915_READ(G4X_AUD_CNTL_ST);
+ i &= ~(eldv | G4X_ELD_ADDR);
+ len = (i >> 9) & 0x1f; /* ELD buffer size */
+ I915_WRITE(G4X_AUD_CNTL_ST, i);
+
+ if (!eld[0])
+ return;
+
+ len = min(eld[2], len);
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)(uintptr_t)eld + i));
+
+ i = I915_READ(G4X_AUD_CNTL_ST);
+ i |= eldv;
+ I915_WRITE(G4X_AUD_CNTL_ST, i);
+}
+
+static void haswell_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t eldv;
+ uint32_t i;
+ int len;
+ int pipe = to_intel_crtc(crtc)->pipe;
+ int tmp;
+
+ int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
+ int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
+ int aud_config = HSW_AUD_CFG(pipe);
+ int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
+
+
+ DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
+
+ /* Audio output enable */
+ DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
+ tmp = I915_READ(aud_cntrl_st2);
+ tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
+ I915_WRITE(aud_cntrl_st2, tmp);
+
+ /* Wait for 1 vertical blank */
+ intel_wait_for_vblank(dev, pipe);
+
+ /* Set ELD valid state */
+ tmp = I915_READ(aud_cntrl_st2);
+ DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
+ tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
+ I915_WRITE(aud_cntrl_st2, tmp);
+ tmp = I915_READ(aud_cntrl_st2);
+ DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
+
+ /* Enable HDMI mode */
+ tmp = I915_READ(aud_config);
+ DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
+ /* clear N_programing_enable and N_value_index */
+ tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
+ I915_WRITE(aud_config, tmp);
+
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
+
+ eldv = AUDIO_ELD_VALID_A << (pipe * 4);
+ intel_crtc->eld_vld = true;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
+ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+ } else
+ I915_WRITE(aud_config, 0);
+
+ if (intel_eld_uptodate(connector,
+ aud_cntrl_st2, eldv,
+ aud_cntl_st, IBX_ELD_ADDRESS,
+ hdmiw_hdmiedid))
+ return;
+
+ i = I915_READ(aud_cntrl_st2);
+ i &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+ if (!eld[0])
+ return;
+
+ i = I915_READ(aud_cntl_st);
+ i &= ~IBX_ELD_ADDRESS;
+ I915_WRITE(aud_cntl_st, i);
+ i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
+ DRM_DEBUG_DRIVER("port num:%d\n", i);
+
+ len = min(eld[2], 21); /* 84 bytes of hw ELD buffer */
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)(uintptr_t)(eld + i)));
+
+ i = I915_READ(aud_cntrl_st2);
+ i |= eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+}
+
+static void ironlake_write_eld(struct drm_connector *connector,
+ struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t eldv;
+ uint32_t i;
+ int len;
+ int hdmiw_hdmiedid;
+ int aud_config;
+ int aud_cntl_st;
+ int aud_cntrl_st2;
+ int pipe = to_intel_crtc(crtc)->pipe;
+
+ if (HAS_PCH_IBX(connector->dev)) {
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ } else {
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ }
+
+ DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
+
+ i = I915_READ(aud_cntl_st);
+ i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
+ if (!i) {
+ DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
+ /* operate blindly on all ports */
+ eldv = IBX_ELD_VALIDB;
+ eldv |= IBX_ELD_VALIDB << 4;
+ eldv |= IBX_ELD_VALIDB << 8;
+ } else {
+ DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
+ eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
+ }
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
+ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+ } else
+ I915_WRITE(aud_config, 0);
+
+ if (intel_eld_uptodate(connector,
+ aud_cntrl_st2, eldv,
+ aud_cntl_st, IBX_ELD_ADDRESS,
+ hdmiw_hdmiedid))
+ return;
+
+ i = I915_READ(aud_cntrl_st2);
+ i &= ~eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+
+ if (!eld[0])
+ return;
+
+ i = I915_READ(aud_cntl_st);
+ i &= ~IBX_ELD_ADDRESS;
+ I915_WRITE(aud_cntl_st, i);
+
+ len = min(eld[2], 21); /* 84 bytes of hw ELD buffer */
+ DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ for (i = 0; i < len; i++)
+ I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)(uintptr_t)eld + i));
+
+ i = I915_READ(aud_cntrl_st2);
+ i |= eldv;
+ I915_WRITE(aud_cntrl_st2, i);
+}
+
+void intel_write_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ connector = drm_select_eld(encoder, mode);
+ if (!connector)
+ return;
+
+ DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ connector->encoder->base.id,
+ drm_get_encoder_name(connector->encoder));
+
+ connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
+
+ if (dev_priv->display.write_eld)
+ dev_priv->display.write_eld(connector, crtc);
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ int palreg = PALETTE(pipe);
+ int i;
+ bool reenable_ips = false;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled || !intel_crtc->active)
+ return;
+
+ if (!HAS_PCH_SPLIT(dev_priv->dev))
+ assert_pll_enabled(dev_priv, pipe);
+
+ /* use legacy palette for Ironlake */
+ if (HAS_PCH_SPLIT(dev))
+ palreg = LGC_PALETTE(pipe);
+
+ /* Workaround : Do not read or write the pipe palette/gamma data while
+ * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+ */
+ if (intel_crtc->config.ips_enabled &&
+ ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
+ GAMMA_MODE_MODE_SPLIT)) {
+ hsw_disable_ips(intel_crtc);
+ reenable_ips = true;
+ }
+
+ for (i = 0; i < 256; i++) {
+ I915_WRITE(palreg + 4 * i,
+ (intel_crtc->lut_r[i] << 16) |
+ (intel_crtc->lut_g[i] << 8) |
+ intel_crtc->lut_b[i]);
+ }
+
+ if (reenable_ips)
+ hsw_enable_ips(intel_crtc);
+}
+
+static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ bool visible = base != 0;
+ u32 cntl;
+
+ if (intel_crtc->cursor_visible == visible)
+ return;
+
+ cntl = I915_READ(_CURACNTR);
+ if (visible) {
+ /* On these chipsets we can only modify the base whilst
+ * the cursor is disabled.
+ */
+ I915_WRITE(_CURABASE, base);
+
+ cntl &= ~(CURSOR_FORMAT_MASK);
+ /* XXX width must be 64, stride 256 => 0x00 << 28 */
+ cntl |= CURSOR_ENABLE |
+ CURSOR_GAMMA_ENABLE |
+ CURSOR_FORMAT_ARGB;
+ } else
+ cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
+ I915_WRITE(_CURACNTR, cntl);
+
+ intel_crtc->cursor_visible = visible;
+}
+
+static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ bool visible = base != 0;
+
+ if (intel_crtc->cursor_visible != visible) {
+ uint32_t cntl = I915_READ(CURCNTR(pipe));
+ if (base) {
+ cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
+ cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ cntl |= pipe << 28; /* Connect to correct pipe */
+ } else {
+ cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+ cntl |= CURSOR_MODE_DISABLE;
+ }
+ I915_WRITE(CURCNTR(pipe), cntl);
+
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
+ I915_WRITE(CURBASE(pipe), base);
+}
+
+static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ bool visible = base != 0;
+
+ if (intel_crtc->cursor_visible != visible) {
+ uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
+ if (base) {
+ cntl &= ~CURSOR_MODE;
+ cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ } else {
+ cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+ cntl |= CURSOR_MODE_DISABLE;
+ }
+ if (IS_HASWELL(dev))
+ cntl |= CURSOR_PIPE_CSC_ENABLE;
+ I915_WRITE(CURCNTR_IVB(pipe), cntl);
+
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
+ I915_WRITE(CURBASE_IVB(pipe), base);
+}
+
+/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
+static void intel_crtc_update_cursor(struct drm_crtc *crtc,
+ bool on)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int x = intel_crtc->cursor_x;
+ int y = intel_crtc->cursor_y;
+ u32 base, pos;
+ bool visible;
+
+ pos = 0;
+
+ if (on && crtc->enabled && crtc->fb) {
+ base = intel_crtc->cursor_addr;
+ if (x > (int) crtc->fb->width)
+ base = 0;
+
+ if (y > (int) crtc->fb->height)
+ base = 0;
+ } else
+ base = 0;
+
+ if (x < 0) {
+ if (x + intel_crtc->cursor_width < 0)
+ base = 0;
+
+ pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+ x = -x;
+ }
+ pos |= x << CURSOR_X_SHIFT;
+
+ if (y < 0) {
+ if (y + intel_crtc->cursor_height < 0)
+ base = 0;
+
+ pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+ y = -y;
+ }
+ pos |= y << CURSOR_Y_SHIFT;
+
+ visible = base != 0;
+ if (!visible && !intel_crtc->cursor_visible)
+ return;
+
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ I915_WRITE(CURPOS_IVB(pipe), pos);
+ ivb_update_cursor(crtc, base);
+ } else {
+ I915_WRITE(CURPOS(pipe), pos);
+ if (IS_845G(dev) || IS_I865G(dev))
+ i845_update_cursor(crtc, base);
+ else
+ i9xx_update_cursor(crtc, base);
+ }
+}
+
+static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file,
+ uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj;
+ uint32_t addr;
+ int ret;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!handle) {
+ DRM_DEBUG_KMS("cursor off\n");
+ addr = 0;
+ obj = NULL;
+ mutex_lock(&dev->struct_mutex);
+ goto finish;
+ }
+
+ /* Currently we only support 64x64 cursors */
+ if (width != 64 || height != 64) {
+ DRM_ERROR("we currently only support 64x64 cursors\n");
+ return -EINVAL;
+ }
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (&obj->base == NULL)
+ return -ENOENT;
+
+ if (obj->base.size < width * height * 4) {
+ DRM_ERROR("buffer is to small\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* we only need to pin inside GTT if cursor is non-phy */
+ mutex_lock(&dev->struct_mutex);
+ if (!dev_priv->info->cursor_needs_physical) {
+ unsigned alignment;
+
+ if (obj->tiling_mode) {
+ DRM_ERROR("cursor cannot be tiled\n");
+ ret = -EINVAL;
+ goto fail_locked;
+ }
+
+ /* Note that the w/a also requires 2 PTE of padding following
+ * the bo. We currently fill all unused PTE with the shadow
+ * page and so we should always have valid PTE following the
+ * cursor preventing the VT-d warning.
+ */
+ alignment = 0;
+ if (need_vtd_wa(dev))
+ alignment = 64*1024;
+
+ ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
+ if (ret) {
+ DRM_ERROR("failed to move cursor bo into the GTT\n");
+ goto fail_locked;
+ }
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret) {
+ DRM_ERROR("failed to release fence for cursor");
+ goto fail_unpin;
+ }
+
+ addr = obj->gtt_offset;
+ obj->is_cursor = 1;
+ } else {
+ int align = IS_I830(dev) ? 16 * 1024 : 256;
+ ret = i915_gem_attach_phys_object(dev, obj,
+ (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
+ align);
+ if (ret) {
+ DRM_ERROR("failed to attach phys object\n");
+ goto fail_locked;
+ }
+ addr = obj->phys_obj->handle->paddr;
+ }
+
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, (height << 12) | width);
+
+ finish:
+ if (intel_crtc->cursor_bo) {
+ if (dev_priv->info->cursor_needs_physical) {
+ if (intel_crtc->cursor_bo != obj)
+ i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
+ } else
+ i915_gem_object_unpin(intel_crtc->cursor_bo);
+ drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ intel_crtc->cursor_addr = addr;
+ intel_crtc->cursor_bo = obj;
+ intel_crtc->cursor_width = (int16_t)width;
+ intel_crtc->cursor_height = (int16_t)height;
+
+ intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
+
+ return 0;
+fail_unpin:
+ i915_gem_object_unpin(obj);
+fail_locked:
+ mutex_unlock(&dev->struct_mutex);
+fail:
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return ret;
+}
+
+static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ intel_crtc->cursor_x = (int16_t)x;
+ intel_crtc->cursor_y = (int16_t)y;
+
+ intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
+
+ return 0;
+}
+
+/** Sets the color ramps on behalf of RandR */
+void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ intel_crtc->lut_r[regno] = red >> 8;
+ intel_crtc->lut_g[regno] = green >> 8;
+ intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ *red = intel_crtc->lut_r[regno] << 8;
+ *green = intel_crtc->lut_g[regno] << 8;
+ *blue = intel_crtc->lut_b[regno] << 8;
+}
+
+static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ int end = (start + size > 256) ? 256 : start + size, i;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ for (i = start; i < end; i++) {
+ intel_crtc->lut_r[i] = red[i] >> 8;
+ intel_crtc->lut_g[i] = green[i] >> 8;
+ intel_crtc->lut_b[i] = blue[i] >> 8;
+ }
+
+ intel_crtc_load_lut(crtc);
+}
+
+/* VESA 640x480x72Hz mode to set on the pipe */
+static struct drm_display_mode load_detect_mode = {
+ DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+};
+
+static struct drm_framebuffer *
+intel_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj)
+{
+ struct intel_framebuffer *intel_fb;
+ int ret;
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return (NULL);
+ }
+
+ ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(&obj->base);
+ kfree(intel_fb, sizeof(struct intel_framebuffer));
+ return (NULL);
+ }
+
+ return &intel_fb->base;
+}
+
+static u32
+intel_framebuffer_pitch_for_width(int width, int bpp)
+{
+ u32 pitch = DIV_ROUND_UP(width * bpp, 8);
+ return ALIGN(pitch, 64);
+}
+
+static u32
+intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
+{
+ u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
+ return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
+}
+
+static struct drm_framebuffer *
+intel_framebuffer_create_for_mode(struct drm_device *dev,
+ struct drm_display_mode *mode,
+ int depth, int bpp)
+{
+ struct drm_i915_gem_object *obj;
+ struct drm_mode_fb_cmd2 mode_cmd;
+
+ obj = i915_gem_alloc_object(dev,
+ intel_framebuffer_size_for_mode(mode, bpp));
+ if (obj == NULL)
+ return (NULL);
+
+ (void) memset(&mode_cmd, 0, sizeof(struct drm_mode_fb_cmd2));
+
+ mode_cmd.width = mode->hdisplay;
+ mode_cmd.height = mode->vdisplay;
+ mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
+ bpp);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+ return intel_framebuffer_create(dev, &mode_cmd, obj);
+}
+
+static struct drm_framebuffer *
+mode_fits_in_fbdev(struct drm_device *dev,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct drm_framebuffer *fb;
+
+ if (dev_priv->fbdev == NULL)
+ return NULL;
+
+ obj = dev_priv->fbdev->ifb.obj;
+ if (obj == NULL)
+ return NULL;
+
+ fb = &dev_priv->fbdev->ifb.base;
+ if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
+ fb->bits_per_pixel))
+ return NULL;
+
+ if (obj->base.size < mode->vdisplay * fb->pitches[0])
+ return NULL;
+
+ return fb;
+}
+
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct intel_load_detect_pipe *old)
+{
+ struct intel_crtc *intel_crtc;
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(connector);
+ struct drm_crtc *possible_crtc;
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = NULL;
+ struct drm_device *dev = encoder->dev;
+ struct drm_framebuffer *fb;
+ int i = -1;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector),
+ encoder->base.id, drm_get_encoder_name(encoder));
+
+ /*
+ * Algorithm gets a little messy:
+ *
+ * - if the connector already has an assigned crtc, use it (but make
+ * sure it's on first)
+ *
+ * - try to find the first unused crtc that can drive this connector,
+ * and use that if we find one
+ */
+
+ /* See if we already have a CRTC for this connector */
+ if (encoder->crtc) {
+ crtc = encoder->crtc;
+
+ mutex_lock(&crtc->mutex);
+
+ /* Make sure the crtc and connector are running */
+ old->dpms_mode = connector->dpms;
+ old->load_detect_temp = false;
+
+ /* Make sure the crtc and connector are running */
+ if (connector->dpms != DRM_MODE_DPMS_ON)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
+
+ return true;
+ }
+
+ /* Find an unused one (if possible) */
+ list_for_each_entry(possible_crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ i++;
+ if (!(encoder->possible_crtcs & (1 << i)))
+ continue;
+ if (!possible_crtc->enabled) {
+ crtc = possible_crtc;
+ break;
+ }
+ }
+
+ /*
+ * If we didn't find an unused CRTC, don't use any.
+ */
+ if (!crtc) {
+ DRM_DEBUG_KMS("no pipe available for load-detect\n");
+ return false;
+ }
+
+ mutex_lock(&crtc->mutex);
+ intel_encoder->new_crtc = to_intel_crtc(crtc);
+ to_intel_connector(connector)->new_encoder = intel_encoder;
+
+ intel_crtc = to_intel_crtc(crtc);
+ old->dpms_mode = connector->dpms;
+ old->load_detect_temp = true;
+ old->release_fb = NULL;
+
+ if (!mode)
+ mode = &load_detect_mode;
+
+ /* We need a framebuffer large enough to accommodate all accesses
+ * that the plane may generate whilst we perform load detection.
+ * We can not rely on the fbcon either being present (we get called
+ * during its initialisation to detect all boot displays, or it may
+ * not even exist) or that it is large enough to satisfy the
+ * requested mode.
+ */
+ fb = mode_fits_in_fbdev(dev, mode);
+ if (fb == NULL) {
+ DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
+ fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
+ old->release_fb = fb;
+ } else
+ DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
+ if (IS_ERR(fb)) {
+ DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
+ mutex_unlock(&crtc->mutex);
+ return false;
+ }
+
+ if (intel_set_mode(crtc, mode, 0, 0, fb)) {
+ DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
+ if (old->release_fb)
+ old->release_fb->funcs->destroy(old->release_fb);
+ mutex_unlock(&crtc->mutex);
+ return false;
+ }
+
+ /* let the connector get through one full cycle before testing */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ return true;
+}
+
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old)
+{
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(connector);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_crtc *crtc = encoder->crtc;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector),
+ encoder->base.id, drm_get_encoder_name(encoder));
+
+ if (old->load_detect_temp) {
+ to_intel_connector(connector)->new_encoder = NULL;
+ intel_encoder->new_crtc = NULL;
+ intel_set_mode(crtc, NULL, 0, 0, NULL);
+
+ if (old->release_fb) {
+ drm_framebuffer_unregister_private(old->release_fb);
+ drm_framebuffer_unreference(old->release_fb);
+ }
+
+ mutex_unlock(&crtc->mutex);
+ return;
+ }
+
+ /* Switch crtc and encoder back off if necessary */
+ if (old->dpms_mode != DRM_MODE_DPMS_ON)
+ connector->funcs->dpms(connector, old->dpms_mode);
+
+ mutex_unlock(&crtc->mutex);
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll = I915_READ(DPLL(pipe));
+ u32 fp;
+ intel_clock_t clock;
+
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = I915_READ(FP0(pipe));
+ else
+ fp = I915_READ(FP1(pipe));
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ if (IS_PINEVIEW(dev)) {
+ clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+ clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ } else {
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ }
+
+ if (!IS_GEN2(dev)) {
+ if (IS_PINEVIEW(dev))
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
+ else
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+ switch (dpll & DPLL_MODE_MASK) {
+ case DPLLB_MODE_DAC_SERIAL:
+ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
+ 5 : 10;
+ break;
+ case DPLLB_MODE_LVDS:
+ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
+ 7 : 14;
+ break;
+ default:
+ DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
+ "mode\n", (int)(dpll & DPLL_MODE_MASK));
+ return 0;
+ }
+
+ if (IS_PINEVIEW(dev))
+ pineview_clock(96000, &clock);
+ else
+ i9xx_clock(96000, &clock);
+ } else {
+ bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
+
+ if (is_lvds) {
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+ clock.p2 = 14;
+
+ if ((dpll & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ /* XXX: might not be 66MHz */
+ i9xx_clock(66000, &clock);
+ } else
+ i9xx_clock(48000, &clock);
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+
+ i9xx_clock(48000, &clock);
+ }
+ }
+
+ /* XXX: It would be nice to validate the clocks, but we can't reuse
+ * i830PllIsValid() because it relies on the xf86_config connector
+ * configuration being accurate, which it isn't necessarily.
+ */
+
+ return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ struct drm_display_mode *mode;
+ int htot = I915_READ(HTOTAL(cpu_transcoder));
+ int hsync = I915_READ(HSYNC(cpu_transcoder));
+ int vtot = I915_READ(VTOTAL(cpu_transcoder));
+ int vsync = I915_READ(VSYNC(cpu_transcoder));
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ mode->clock = intel_crtc_clock_get(dev, crtc);
+ mode->hdisplay = (htot & 0xffff) + 1;
+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+ mode->hsync_start = (hsync & 0xffff) + 1;
+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+ mode->vdisplay = (vtot & 0xffff) + 1;
+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+ mode->vsync_start = (vsync & 0xffff) + 1;
+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+ drm_mode_set_name(mode);
+
+ return mode;
+}
+
+void intel_increase_pllclock(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int dpll_reg = DPLL(pipe);
+ int dpll;
+
+ if (HAS_PCH_SPLIT(dev))
+ return;
+
+ if (!dev_priv->lvds_downclock_avail)
+ return;
+
+ dpll = I915_READ(dpll_reg);
+ if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
+ DRM_DEBUG_DRIVER("upclocking LVDS\n");
+
+ assert_panel_unlocked(dev_priv, pipe);
+
+ dpll &= ~DISPLAY_RATE_SELECT_FPA1;
+ I915_WRITE(dpll_reg, dpll);
+ intel_wait_for_vblank(dev, pipe);
+
+ dpll = I915_READ(dpll_reg);
+ if (dpll & DISPLAY_RATE_SELECT_FPA1)
+ DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
+ }
+}
+
+static void intel_decrease_pllclock(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (HAS_PCH_SPLIT(dev))
+ return;
+
+ if (!dev_priv->lvds_downclock_avail)
+ return;
+
+ /*
+ * Since this is called by a timer, we should never get here in
+ * the manual case.
+ */
+ if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
+ int pipe = intel_crtc->pipe;
+ int dpll_reg = DPLL(pipe);
+ int dpll;
+
+ DRM_DEBUG_DRIVER("downclocking LVDS\n");
+
+ assert_panel_unlocked(dev_priv, pipe);
+
+ dpll = I915_READ(dpll_reg);
+ dpll |= DISPLAY_RATE_SELECT_FPA1;
+ I915_WRITE(dpll_reg, dpll);
+ intel_wait_for_vblank(dev, pipe);
+ dpll = I915_READ(dpll_reg);
+ if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
+ DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
+ }
+
+}
+
+void intel_mark_busy(struct drm_device *dev)
+{
+}
+
+void intel_mark_idle(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ if (!i915_powersave)
+ return;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ if (!crtc->fb)
+ continue;
+
+ intel_decrease_pllclock(crtc);
+ }
+}
+
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_crtc *crtc;
+
+ if (!i915_powersave)
+ return;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ if (!crtc->fb)
+ continue;
+
+ if (to_intel_framebuffer(crtc->fb)->obj != obj)
+ continue;
+
+ intel_increase_pllclock(crtc);
+ if (ring && intel_fbc_enabled(dev))
+ ring->fbc_dirty = true;
+ }
+}
+
+static void intel_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_unpin_work *work;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+ intel_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (work) {
+ cancel_delayed_work(dev_priv->other_wq);
+ kfree(work, sizeof(*work));
+ }
+
+ intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
+
+ drm_crtc_cleanup(crtc);
+
+ kfree(intel_crtc, sizeof (struct intel_crtc) +
+ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)));
+}
+
+static void intel_unpin_work_fn(struct work_struct *__work)
+{
+ struct intel_unpin_work *work =
+ container_of(__work, struct intel_unpin_work, work);
+ struct drm_device *dev = work->crtc->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(work->old_fb_obj);
+ drm_gem_object_unreference(&work->pending_flip_obj->base);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+
+ intel_update_fbc(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
+ atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+ kfree(work, sizeof(struct intel_unpin_work));
+}
+
+static void do_intel_finish_page_flip(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ unsigned long flags;
+
+ /* Ignore early vblank irqs */
+ if (intel_crtc == NULL)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+ if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return;
+ }
+
+ intel_crtc->unpin_work = NULL;
+
+ if (work->event) {
+ drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
+
+ pollwakeup(&work->event->base.file_priv->drm_pollhead, POLLIN | POLLRDNORM);
+ }
+ drm_vblank_put(dev, intel_crtc->pipe);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ DRM_WAKEUP(&dev_priv->pending_flip_queue);
+
+ (void) queue_work(dev_priv->wq, &work->work);
+
+}
+
+void intel_finish_page_flip(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ do_intel_finish_page_flip(dev, crtc);
+}
+
+void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
+
+ do_intel_finish_page_flip(dev, crtc);
+}
+
+void intel_prepare_page_flip(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+ unsigned long flags;
+
+ /* NB: An MMIO update of the plane base pointer will also
+ * generate a page-flip completion irq, i.e. every modeset
+ * is also accompanied by a spurious intel_prepare_page_flip().
+ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (intel_crtc->unpin_work)
+ atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+{
+ /* Ensure that the work item is consistent when activating it ... */
+ membar_producer();
+ atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+ /* and that it is marked active as soon as the irq could fire. */
+ membar_producer();
+}
+
+static int intel_gen2_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 flip_mask;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto err;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ goto err_unpin;
+
+ /* Can't queue multiple flips, so wait for the previous
+ * one to finish before executing the next.
+ */
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, 0); /* aux display base address, unused */
+
+ intel_mark_page_flip_active(intel_crtc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
+ return ret;
+}
+
+static int intel_gen3_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ u32 flip_mask;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto err;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ goto err_unpin;
+
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_mark_page_flip_active(intel_crtc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
+ return ret;
+}
+
+static int intel_gen4_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pf, pipesrc;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto err;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto err_unpin;
+
+ /* i965+ uses the linear or tiled offsets from the
+ * Display Registers (which do not change across a page-flip)
+ * so we need only reprogram the base address.
+ */
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring,
+ (obj->gtt_offset + intel_crtc->dspaddr_offset) |
+ obj->tiling_mode);
+
+ /* XXX Enabling the panel-fitter across page-flip is so far
+ * untested on non-native modes, so ignore it for now.
+ * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ intel_ring_emit(ring, pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
+ return ret;
+}
+
+static int intel_gen6_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ uint32_t pf, pipesrc;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto err;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto err_unpin;
+
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+
+ /* Contrary to the suggestions in the documentation,
+ * "Enable Panel Fitter" does not seem to be required when page
+ * flipping with a non-native mode, and worse causes a normal
+ * modeset to fail.
+ * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ intel_ring_emit(ring, pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
+ return ret;
+}
+
+/*
+ * On gen7 we currently use the blit ring because (in early silicon at least)
+ * the render ring doesn't give us interrpts for page flip completion, which
+ * means clients will hang after the first flip is queued. Fortunately the
+ * blit ring generates interrupts properly, so use it instead.
+ */
+static int intel_gen7_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ uint32_t plane_bit = 0;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto err;
+
+ switch(intel_crtc->plane) {
+ case PLANE_A:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
+ break;
+ case PLANE_B:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
+ break;
+ case PLANE_C:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
+ break;
+ default:
+ DRM_ERROR("unknown plane in flip command\n");
+ ret = -ENODEV;
+ goto err_unpin;
+ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto err_unpin;
+
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
+ intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, (MI_NOOP));
+
+ intel_mark_page_flip_active(intel_crtc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
+ return ret;
+}
+
+static int intel_default_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ return -ENODEV;
+}
+
+static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *old_fb = crtc->fb;
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ unsigned long flags;
+ int ret;
+
+ /* Can't change pixel format via MI display flips. */
+ if (fb->pixel_format != crtc->fb->pixel_format)
+ return -EINVAL;
+
+ /*
+ * TILEOFF/LINOFF registers can't be changed via MI display flips.
+ * Note that pitch changes could also affect these register.
+ */
+ if (INTEL_INFO(dev)->gen > 3 &&
+ (fb->offsets[0] != crtc->fb->offsets[0] ||
+ fb->pitches[0] != crtc->fb->pitches[0]))
+ return -EINVAL;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+ work->event = event;
+ work->crtc = crtc;
+ work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
+ INIT_WORK(&work->work, intel_unpin_work_fn);
+
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+ if (ret)
+ goto free_work;
+
+ /* We borrow the event spin lock for protecting unpin_work */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (intel_crtc->unpin_work) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(work, sizeof(struct intel_unpin_work));
+ drm_vblank_put(dev, intel_crtc->pipe);
+
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+ return -EBUSY;
+ }
+ intel_crtc->unpin_work = work;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
+ flush_workqueue(dev_priv->wq);
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* Reference the objects for the scheduled work. */
+ drm_gem_object_reference(&work->old_fb_obj->base);
+ drm_gem_object_reference(&obj->base);
+
+ crtc->fb = fb;
+
+ work->pending_flip_obj = obj;
+ work->enable_stall_check = true;
+
+ atomic_inc(&intel_crtc->unpin_work_count);
+ intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
+ if (ret)
+ goto cleanup_pending;
+
+ intel_disable_fbc(dev);
+ intel_mark_fb_busy(obj, NULL);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+
+cleanup_pending:
+ atomic_dec(&intel_crtc->unpin_work_count);
+ crtc->fb = old_fb;
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+
+cleanup:
+ spin_lock_irqsave(&dev->event_lock, flags);
+ intel_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
+ kfree(work, sizeof(struct intel_unpin_work));
+
+ return ret;
+}
+
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
+ .load_lut = intel_crtc_load_lut,
+};
+
+static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
+ struct drm_crtc *crtc)
+{
+ struct drm_device *dev;
+ struct drm_crtc *tmp;
+ int crtc_mask = 1;
+
+ ASSERT(crtc);
+
+ dev = crtc->dev;
+
+ list_for_each_entry(tmp, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ if (tmp == crtc)
+ break;
+ crtc_mask <<= 1;
+ }
+
+ if (encoder->possible_crtcs & crtc_mask)
+ return true;
+ return false;
+}
+
+/**
+ * intel_modeset_update_staged_output_state
+ *
+ * Updates the staged output configuration state, e.g. after we've read out the
+ * current hw state.
+ */
+static void intel_modeset_update_staged_output_state(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, struct intel_connector, &dev->mode_config.connector_list,
+ base.head) {
+ connector->new_encoder =
+ to_intel_encoder(connector->base.encoder);
+ }
+
+ list_for_each_entry(encoder, struct intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ encoder->new_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ }
+}
+
+/**
+ * intel_modeset_commit_output_state
+ *
+ * This function copies the stage display pipe configuration to the real one.
+ */
+static void intel_modeset_commit_output_state(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, struct intel_connector, &dev->mode_config.connector_list,
+ base.head) {
+ connector->base.encoder = &connector->new_encoder->base;
+ }
+
+ list_for_each_entry(encoder, struct intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ encoder->base.crtc = &encoder->new_crtc->base;
+ }
+}
+
+static void
+connected_sink_compute_bpp(struct intel_connector * connector,
+ struct intel_crtc_config *pipe_config)
+{
+ int bpp = pipe_config->pipe_bpp;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base));
+
+ /* Don't use an invalid EDID bpc value */
+ if (connector->base.display_info.bpc &&
+ connector->base.display_info.bpc * 3 < bpp) {
+ DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
+ bpp, connector->base.display_info.bpc*3);
+ pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
+ }
+
+ /* Clamp bpp to 8 on screens without EDID 1.4 */
+ if (connector->base.display_info.bpc == 0 && bpp > 24) {
+ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+ bpp);
+ pipe_config->pipe_bpp = 24;
+ }
+}
+
+static int
+compute_baseline_pipe_bpp(struct intel_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_connector *connector;
+ int bpp;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_C8:
+ bpp = 8*3; /* since we go through a colormap */
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ /* checked in intel_framebuffer_init already */
+ if (INTEL_INFO(dev)->gen > 3)
+ return -EINVAL;
+ case DRM_FORMAT_RGB565:
+ bpp = 6*3; /* min is 18bpp */
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ /* checked in intel_framebuffer_init already */
+ if (INTEL_INFO(dev)->gen < 4)
+ return -EINVAL;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ bpp = 8*3;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ /* checked in intel_framebuffer_init already */
+ if (INTEL_INFO(dev)->gen < 4)
+ return -EINVAL;
+ bpp = 10*3;
+ break;
+ /* TODO: gen4+ supports 16 bpc floating point, too. */
+ default:
+ DRM_DEBUG_KMS("unsupported depth\n");
+ return -EINVAL;
+ }
+
+ pipe_config->pipe_bpp = bpp;
+
+ /* Clamp display bpp to EDID value */
+ list_for_each_entry(connector, struct intel_connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (!connector->new_encoder ||
+ connector->new_encoder->new_crtc != crtc)
+ continue;
+
+ connected_sink_compute_bpp(connector, pipe_config);
+ }
+
+ return bpp;
+}
+
+static void intel_dump_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config,
+ const char *context)
+{
+ DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
+ context, pipe_name(crtc->pipe));
+
+ DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
+ DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
+ pipe_config->pipe_bpp, pipe_config->dither);
+ DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ pipe_config->has_pch_encoder,
+ pipe_config->fdi_lanes,
+ pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
+ pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
+ pipe_config->fdi_m_n.tu);
+ DRM_DEBUG_KMS("requested mode:\n");
+ drm_mode_debug_printmodeline(&pipe_config->requested_mode);
+ DRM_DEBUG_KMS("adjusted mode:\n");
+ drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
+ DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
+ DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x\n",
+ pipe_config->pch_pfit.pos,
+ pipe_config->pch_pfit.size);
+ DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
+}
+
+static bool check_encoder_cloning(struct drm_crtc *crtc)
+{
+ int num_encoders = 0;
+ bool uncloneable_encoders = false;
+ struct intel_encoder *encoder;
+
+ list_for_each_entry(encoder, struct intel_encoder, &crtc->dev->mode_config.encoder_list,
+ base.head) {
+ if (&encoder->new_crtc->base != crtc)
+ continue;
+
+ num_encoders++;
+ if (!encoder->cloneable)
+ uncloneable_encoders = true;
+ }
+
+ return !(num_encoders > 1 && uncloneable_encoders);
+}
+
+static struct intel_crtc_config *
+intel_modeset_pipe_config(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct intel_encoder *encoder;
+ struct intel_crtc_config *pipe_config;
+ int plane_bpp, ret = -EINVAL;
+ bool retry = true;
+
+ if (!check_encoder_cloning(crtc)) {
+ DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+ return NULL;
+ }
+
+ pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+ if (!pipe_config)
+ return NULL;
+
+ drm_mode_copy(&pipe_config->adjusted_mode, mode);
+ drm_mode_copy(&pipe_config->requested_mode, mode);
+ pipe_config->cpu_transcoder = (enum transcoder)to_intel_crtc(crtc)->pipe;
+ pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+
+ /* Compute a starting value for pipe_config->pipe_bpp taking the source
+ * plane pixel format and any sink constraints into account. Returns the
+ * source plane bpp so that dithering can be selected on mismatches
+ * after encoders and crtc also have had their say. */
+ plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
+ fb, pipe_config);
+ if (plane_bpp < 0)
+ goto fail;
+
+encoder_retry:
+ /* Ensure the port clock defaults are reset when retrying. */
+ pipe_config->port_clock = 0;
+ pipe_config->pixel_multiplier = 1;
+
+ /* Pass our mode to the connectors and the CRTC to give them a chance to
+ * adjust it according to limitations or connector properties, and also
+ * a chance to reject the mode entirely.
+ */
+ list_for_each_entry(encoder, struct intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+
+ if (&encoder->new_crtc->base != crtc)
+ continue;
+
+ if (encoder->compute_config) {
+ if (!(encoder->compute_config(encoder, pipe_config))) {
+ DRM_DEBUG_KMS("Encoder config failure\n");
+ goto fail;
+ }
+
+ continue;
+ }
+
+ encoder_funcs = encoder->base.helper_private;
+ if (!(encoder_funcs->mode_fixup(&encoder->base,
+ &pipe_config->requested_mode,
+ &pipe_config->adjusted_mode))) {
+ DRM_DEBUG_KMS("Encoder fixup failed\n");
+ goto fail;
+ }
+ }
+
+ /* Set default port clock if not overwritten by the encoder. Needs to be
+ * done afterwards in case the encoder adjusts the mode. */
+ if (!pipe_config->port_clock)
+ pipe_config->port_clock = pipe_config->adjusted_mode.clock;
+
+ ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("CRTC fixup failed\n");
+ goto fail;
+ }
+
+ if (ret == RETRY) {
+ if (!retry) {
+ DRM_ERROR("loop in pipe configuration computation");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
+ retry = false;
+ goto encoder_retry;
+ }
+
+ pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
+ DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
+ plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+
+ return pipe_config;
+fail:
+ kfree(pipe_config, sizeof(*pipe_config));
+ return NULL;
+}
+
+/* Computes which crtcs are affected and sets the relevant bits in the mask. For
+ * simplicity we use the crtc's pipe number (because it's easier to obtain). */
+static void
+intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
+ unsigned *prepare_pipes, unsigned *disable_pipes)
+{
+ struct intel_crtc *intel_crtc;
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_crtc *tmp_crtc;
+
+ *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
+
+ /* Check which crtcs have changed outputs connected to them, these need
+ * to be part of the prepare_pipes mask. We don't (yet) support global
+ * modeset across multiple crtcs, so modeset_pipes will only have one
+ * bit set at most. */
+ list_for_each_entry(connector, struct intel_connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->base.encoder == &connector->new_encoder->base)
+ continue;
+
+ if (connector->base.encoder) {
+ tmp_crtc = connector->base.encoder->crtc;
+
+ *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+ }
+
+ if (connector->new_encoder)
+ *prepare_pipes |=
+ 1 << connector->new_encoder->new_crtc->pipe;
+ }
+
+ list_for_each_entry(encoder, struct intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->base.crtc == &encoder->new_crtc->base)
+ continue;
+
+ if (encoder->base.crtc) {
+ tmp_crtc = encoder->base.crtc;
+
+ *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+ }
+
+ if (encoder->new_crtc)
+ *prepare_pipes |= 1 << encoder->new_crtc->pipe;
+ }
+
+ /* Check for any pipes that will be fully disabled ... */
+ list_for_each_entry(intel_crtc, struct intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ bool used = false;
+
+ /* Don't try to disable disabled crtcs. */
+ if (!intel_crtc->base.enabled)
+ continue;
+
+ list_for_each_entry(encoder, struct intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->new_crtc == intel_crtc)
+ used = true;
+ }
+
+ if (!used)
+ *disable_pipes |= 1 << intel_crtc->pipe;
+ }
+
+
+ /* set_mode is also used to update properties on life display pipes. */
+ intel_crtc = to_intel_crtc(crtc);
+ if (crtc->enabled)
+ *prepare_pipes |= 1 << intel_crtc->pipe;
+
+ /*
+ * For simplicity do a full modeset on any pipe where the output routing
+ * changed. We could be more clever, but that would require us to be
+ * more careful with calling the relevant encoder->mode_set functions.
+ */
+ if (*prepare_pipes)
+ *modeset_pipes = *prepare_pipes;
+
+ /* ... and mask these out. */
+ *modeset_pipes &= ~(*disable_pipes);
+ *prepare_pipes &= ~(*disable_pipes);
+
+ /*
+ * HACK: We don't (yet) fully support global modesets. intel_set_config
+ * obies this rule, but the modeset restore mode of
+ * intel_modeset_setup_hw_state does not.
+ */
+ *modeset_pipes &= 1 << intel_crtc->pipe;
+ *prepare_pipes &= 1 << intel_crtc->pipe;
+
+ DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
+ *modeset_pipes, *prepare_pipes, *disable_pipes);
+}
+
+static bool intel_crtc_in_use(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev = crtc->dev;
+
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc)
+ return true;
+
+ return false;
+}
+
+static void
+intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
+{
+ struct intel_encoder *intel_encoder;
+ struct intel_crtc *intel_crtc;
+ struct drm_connector *connector;
+
+ list_for_each_entry(intel_encoder, struct intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (!intel_encoder->base.crtc)
+ continue;
+
+ intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+
+ if (prepare_pipes & (1 << intel_crtc->pipe))
+ intel_encoder->connectors_active = false;
+ }
+
+ intel_modeset_commit_output_state(dev);
+
+ /* Update computed state. */
+ list_for_each_entry(intel_crtc, struct intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
+ }
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder || !connector->encoder->crtc)
+ continue;
+
+ intel_crtc = to_intel_crtc(connector->encoder->crtc);
+
+ if (prepare_pipes & (1 << intel_crtc->pipe)) {
+ struct drm_property *dpms_property =
+ dev->mode_config.dpms_property;
+
+ connector->dpms = DRM_MODE_DPMS_ON;
+ drm_object_property_set_value(&connector->base,
+ dpms_property,
+ DRM_MODE_DPMS_ON);
+
+ intel_encoder = to_intel_encoder(connector->encoder);
+ intel_encoder->connectors_active = true;
+ }
+ }
+
+}
+
+#define for_each_intel_crtc_masked(dev, mask, _intel_crtc) \
+ list_for_each_entry((_intel_crtc), struct intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ if (mask & (1 <<(_intel_crtc)->pipe)) \
+
+static bool
+intel_pipe_config_compare(struct drm_device *dev,
+ struct intel_crtc_config *current_config,
+ struct intel_crtc_config *pipe_config)
+{
+#define PIPE_CONF_CHECK_X(name) \
+ if (current_config->name != pipe_config->name) { \
+ DRM_DEBUG_KMS("mismatch in " #name " " \
+ "(expected 0x%08x, found 0x%08x)\n", \
+ current_config->name, \
+ pipe_config->name); \
+ return false; \
+ }
+
+#define PIPE_CONF_CHECK_I(name) \
+ if (current_config->name != pipe_config->name) { \
+ DRM_DEBUG_KMS("mismatch in " #name " " \
+ "(expected %i, found %i)\n", \
+ current_config->name, \
+ pipe_config->name); \
+ return false; \
+ }
+
+#define PIPE_CONF_CHECK_FLAGS(name, mask) \
+ if ((current_config->name ^ pipe_config->name) & (mask)) { \
+ DRM_DEBUG_KMS("mismatch in " #name " " \
+ "(expected %i, found %i)\n", \
+ current_config->name & (mask), \
+ pipe_config->name & (mask)); \
+ return false; \
+ }
+
+#define PIPE_CONF_QUIRK(quirk) \
+ ((current_config->quirks | pipe_config->quirks) & (quirk))
+
+ PIPE_CONF_CHECK_I(cpu_transcoder);
+
+ PIPE_CONF_CHECK_I(has_pch_encoder);
+ PIPE_CONF_CHECK_I(fdi_lanes);
+ PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
+ PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
+ PIPE_CONF_CHECK_I(fdi_m_n.link_m);
+ PIPE_CONF_CHECK_I(fdi_m_n.link_n);
+ PIPE_CONF_CHECK_I(fdi_m_n.tu);
+
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
+
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
+ PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
+
+ if (!HAS_PCH_SPLIT(dev))
+ PIPE_CONF_CHECK_I(pixel_multiplier);
+
+ PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ DRM_MODE_FLAG_INTERLACE);
+
+ if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
+ PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ DRM_MODE_FLAG_PHSYNC);
+ PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ DRM_MODE_FLAG_NHSYNC);
+ PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ DRM_MODE_FLAG_PVSYNC);
+ PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+ DRM_MODE_FLAG_NVSYNC);
+ }
+
+ PIPE_CONF_CHECK_I(requested_mode.hdisplay);
+ PIPE_CONF_CHECK_I(requested_mode.vdisplay);
+
+ PIPE_CONF_CHECK_I(gmch_pfit.control);
+ /* pfit ratios are autocomputed by the hw on gen4+ */
+ if (INTEL_INFO(dev)->gen < 4)
+ PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+ PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+ PIPE_CONF_CHECK_I(pch_pfit.pos);
+ PIPE_CONF_CHECK_I(pch_pfit.size);
+
+ PIPE_CONF_CHECK_I(ips_enabled);
+
+ PIPE_CONF_CHECK_I(shared_dpll);
+ PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
+ PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
+
+#undef PIPE_CONF_CHECK_X
+#undef PIPE_CONF_CHECK_I
+#undef PIPE_CONF_CHECK_FLAGS
+#undef PIPE_CONF_QUIRK
+
+ return true;
+}
+
+static void
+check_connector_state(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+
+ list_for_each_entry(connector, struct intel_connector, &dev->mode_config.connector_list,
+ base.head) {
+ /* This also checks the encoder/connector hw state with the
+ * ->get_hw_state callbacks. */
+ intel_connector_check_state(connector);
+
+ if (&connector->new_encoder->base != connector->base.encoder)
+ DRM_ERROR("connector's staged encoder doesn't match current encoder\n");
+ }
+}
+
+static void
+check_encoder_state(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+
+ list_for_each_entry(encoder, struct intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ bool enabled = false;
+ bool active = false;
+ enum pipe pipe, tracked_pipe;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+
+ if (&encoder->new_crtc->base != encoder->base.crtc)
+ DRM_ERROR("encoder's stage crtc doesn't match current crtc\n");
+ if (encoder->connectors_active && !encoder->base.crtc)
+ DRM_ERROR("encoder's active_connectors set, but no crtc\n");
+
+ list_for_each_entry(connector, struct intel_connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->base.encoder != &encoder->base)
+ continue;
+ enabled = true;
+ if (connector->base.dpms != DRM_MODE_DPMS_OFF)
+ active = true;
+ }
+ if (!!encoder->base.crtc != enabled)
+ DRM_ERROR("encoder's enabled state mismatch "
+ "(expected %i, found %i)\n",
+ !!encoder->base.crtc, enabled);
+ if (active && !encoder->base.crtc)
+ DRM_ERROR("active encoder with no crtc\n");
+
+ if (encoder->connectors_active != active)
+ DRM_ERROR("encoder's computed active state doesn't match tracked active state "
+ "(expected %i, found %i)\n", active, encoder->connectors_active);
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ if (active != encoder->connectors_active)
+ DRM_ERROR("encoder's hw state doesn't match sw tracking "
+ "(expected %i, found %i)\n",
+ encoder->connectors_active, active);
+
+ if (!encoder->base.crtc)
+ continue;
+
+ tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+ if (active && pipe != tracked_pipe)
+ DRM_ERROR("active encoder's pipe doesn't match"
+ "(expected %i, found %i)\n",
+ tracked_pipe, pipe);
+
+ }
+}
+
+static void
+check_crtc_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_crtc_config pipe_config;
+
+ list_for_each_entry(crtc, struct intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ bool enabled = false;
+ bool active = false;
+
+ (void) memset(&pipe_config, 0, sizeof(pipe_config));
+
+ DRM_DEBUG_KMS("[CRTC:%d]\n",
+ crtc->base.base.id);
+
+ if (crtc->active && !crtc->base.enabled)
+ DRM_ERROR("active crtc, but not enabled in sw tracking\n");
+
+ list_for_each_entry(encoder, struct intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ if (encoder->base.crtc != &crtc->base)
+ continue;
+ enabled = true;
+ if (encoder->connectors_active)
+ active = true;
+ }
+
+ if (active != crtc->active)
+ DRM_ERROR("crtc's computed active state doesn't match tracked active state "
+ "(expected %i, found %i)\n", active, crtc->active);
+ if (enabled != crtc->base.enabled)
+ DRM_ERROR("crtc's computed enabled state doesn't match tracked enabled state "
+ "(expected %i, found %i)\n", enabled, crtc->base.enabled);
+
+ active = dev_priv->display.get_pipe_config(crtc,
+ &pipe_config);
+
+ /* hw state is inconsistent with the pipe A quirk */
+ if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+ active = crtc->active;
+
+ list_for_each_entry(encoder, struct intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ enum pipe pipe;
+ if (encoder->base.crtc != &crtc->base)
+ continue;
+ if (encoder->get_config &&
+ encoder->get_hw_state(encoder, &pipe))
+ encoder->get_config(encoder, &pipe_config);
+ }
+
+ if(crtc->active != active)
+ DRM_ERROR("crtc active state doesn't match with hw state "
+ "(expected %i, found %i)\n", crtc->active, active);
+
+ if (active &&
+ !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
+ DRM_DEBUG_KMS("pipe state doesn't match!\n");
+ intel_dump_pipe_config(crtc, &pipe_config,
+ "[hw state]");
+ intel_dump_pipe_config(crtc, &crtc->config,
+ "[sw state]");
+ }
+ }
+}
+
+static void
+check_shared_dpll_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc;
+ struct intel_dpll_hw_state dpll_hw_state;
+ int i;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+ int enabled_crtcs = 0, active_crtcs = 0;
+ bool active;
+
+ memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+
+ DRM_DEBUG_KMS("%s\n", pll->name);
+
+ active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
+
+ if(pll->active > pll->refcount)
+ DRM_DEBUG_KMS("more active pll users than references: %i vs %i\n",
+ pll->active, pll->refcount);
+ if(pll->active && !pll->on)
+ DRM_DEBUG_KMS("pll in active use but not on in sw tracking\n");
+ if(pll->on && !pll->active)
+ DRM_DEBUG_KMS("pll in on but not on in use in sw tracking\n");
+ if(pll->on != active)
+ DRM_DEBUG_KMS("pll on state mismatch (expected %i, found %i)\n",
+ pll->on, active);
+
+ list_for_each_entry(crtc, struct intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
+ enabled_crtcs++;
+ if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
+ active_crtcs++;
+ }
+ if(pll->active != active_crtcs)
+ DRM_DEBUG_KMS("pll active crtcs mismatch (expected %i, found %i)\n",
+ pll->active, active_crtcs);
+ if(pll->refcount != enabled_crtcs)
+ DRM_DEBUG_KMS("pll enabled crtcs mismatch (expected %i, found %i)\n",
+ pll->refcount, enabled_crtcs);
+
+ if(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
+ sizeof(dpll_hw_state)))
+ DRM_ERROR("pll hw state mismatch\n");
+ }
+}
+
+void
+intel_modeset_check_state(struct drm_device *dev)
+{
+ check_connector_state(dev);
+ check_encoder_state(dev);
+ check_crtc_state(dev);
+ check_shared_dpll_state(dev);
+}
+
+static int __intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_display_mode *saved_mode, *saved_hwmode;
+ struct intel_crtc_config *pipe_config = NULL;
+ struct intel_crtc *intel_crtc;
+ unsigned disable_pipes, prepare_pipes, modeset_pipes;
+ int ret = 0;
+
+ saved_mode = kzalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
+ if (!saved_mode)
+ return -ENOMEM;
+ saved_hwmode = saved_mode + 1;
+
+ intel_modeset_affected_pipes(crtc, &modeset_pipes,
+ &prepare_pipes, &disable_pipes);
+
+ *saved_hwmode = crtc->hwmode;
+ *saved_mode = crtc->mode;
+
+ /* Hack: Because we don't (yet) support global modeset on multiple
+ * crtcs, we don't keep track of the new mode for more than one crtc.
+ * Hence simply check whether any bit is set in modeset_pipes in all the
+ * pieces of code that are not yet converted to deal with mutliple crtcs
+ * changing their mode at the same time. */
+ if (modeset_pipes) {
+ pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
+ if (!(pipe_config)) {
+ ret = -EINVAL;
+ pipe_config = NULL;
+
+ goto out;
+ }
+ intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+ "[modeset]");
+ }
+
+ for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
+ intel_crtc_disable(&intel_crtc->base);
+
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
+ if (intel_crtc->base.enabled)
+ dev_priv->display.crtc_disable(&intel_crtc->base);
+ }
+
+ /* crtc->mode is already used by the ->mode_set callbacks, hence we need
+ * to set it here already despite that we pass it down the callchain.
+ */
+ if (modeset_pipes) {
+ crtc->mode = *mode;
+ /* mode_set/enable/disable functions rely on a correct pipe
+ * config. */
+ to_intel_crtc(crtc)->config = *pipe_config;
+ }
+
+ /* Only after disabling all output pipelines that will be changed can we
+ * update the the output configuration. */
+ intel_modeset_update_state(dev, prepare_pipes);
+
+ if (dev_priv->display.modeset_global_resources)
+ dev_priv->display.modeset_global_resources(dev);
+
+ /* Set up the DPLL and any encoders state that needs to adjust or depend
+ * on the DPLL.
+ */
+ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+ ret = intel_crtc_mode_set(&intel_crtc->base,
+ x, y, fb);
+ if (ret)
+ goto done;
+ }
+
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
+ dev_priv->display.crtc_enable(&intel_crtc->base);
+
+ if (modeset_pipes) {
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = pipe_config->adjusted_mode;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc);
+ }
+
+ /* FIXME: add subpixel order */
+done:
+ if (ret && crtc->enabled) {
+ crtc->hwmode = *saved_hwmode;
+ crtc->mode = *saved_mode;
+ }
+
+out:
+ if (pipe_config)
+ kfree(pipe_config, sizeof(*pipe_config));
+ kfree(saved_mode, 2 * sizeof(*saved_mode));
+ return ret;
+}
+
+int intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ int ret;
+
+ ret = __intel_set_mode(crtc, mode, x, y, fb);
+
+ if (ret == 0)
+ intel_modeset_check_state(crtc->dev);
+
+ return ret;
+}
+
+void intel_crtc_restore_mode(struct drm_crtc *crtc)
+{
+ intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
+}
+
+#undef for_each_intel_crtc_masked
+
+static void intel_set_config_free(struct drm_device *dev, struct intel_set_config *config)
+{
+ if (!config)
+ return;
+
+ kfree(config->save_connector_encoders, dev->mode_config.num_connector * sizeof(struct drm_encoder *));
+ kfree(config->save_encoder_crtcs, dev->mode_config.num_encoder * sizeof(struct drm_crtc *));
+ kfree(config, sizeof(*config));
+}
+
+static int intel_set_config_save_state(struct drm_device *dev,
+ struct intel_set_config *config)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int count;
+
+ config->save_encoder_crtcs =
+ kcalloc(dev->mode_config.num_encoder,
+ sizeof(struct drm_crtc *), GFP_KERNEL);
+ if (!config->save_encoder_crtcs)
+ return -ENOMEM;
+
+ config->save_connector_encoders =
+ kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_encoder *), GFP_KERNEL);
+ if (!config->save_connector_encoders)
+ return -ENOMEM;
+
+ /* Copy data. Note that driver private data is not affected.
+ * Should anything bad happen only the expected state is
+ * restored, not the drivers personal bookkeeping.
+ */
+ count = 0;
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+ config->save_encoder_crtcs[count++] = encoder->crtc;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ config->save_connector_encoders[count++] = connector->encoder;
+ }
+
+ return 0;
+}
+
+static void intel_set_config_restore_state(struct drm_device *dev,
+ struct intel_set_config *config)
+{
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ int count;
+
+ count = 0;
+ list_for_each_entry(encoder, struct intel_encoder, &dev->mode_config.encoder_list, base.head) {
+ encoder->new_crtc =
+ to_intel_crtc(config->save_encoder_crtcs[count++]);
+ }
+
+ count = 0;
+ list_for_each_entry(connector, struct intel_connector, &dev->mode_config.connector_list, base.head) {
+ connector->new_encoder =
+ to_intel_encoder(config->save_connector_encoders[count++]);
+ }
+}
+
+static bool
+is_crtc_connector_off(struct drm_mode_set *set)
+{
+ int i;
+
+ if (set->num_connectors == 0)
+ return false;
+
+ if ((set->connectors == NULL))
+ return false;
+
+ for (i = 0; i < set->num_connectors; i++)
+ if (set->connectors[i]->encoder &&
+ set->connectors[i]->encoder->crtc == set->crtc &&
+ set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
+ return true;
+
+ return false;
+}
+
+static void
+intel_set_config_compute_mode_changes(struct drm_mode_set *set,
+ struct intel_set_config *config)
+{
+
+ /* We should be able to check here if the fb has the same properties
+ * and then just flip_or_move it */
+ if (is_crtc_connector_off(set)) {
+ config->mode_changed = true;
+ } else if (set->crtc->fb != set->fb) {
+ /* If we have no fb then treat it as a full mode set */
+ if (set->crtc->fb == NULL) {
+ DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+ config->mode_changed = true;
+ } else if (set->fb == NULL) {
+ config->mode_changed = true;
+ } else if (set->fb->pixel_format !=
+ set->crtc->fb->pixel_format) {
+ config->mode_changed = true;
+ } else {
+ config->fb_changed = true;
+ }
+ }
+
+ if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
+ config->fb_changed = true;
+
+ if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+ DRM_DEBUG_KMS("modes are different, full mode set\n");
+ drm_mode_debug_printmodeline(&set->crtc->mode);
+ drm_mode_debug_printmodeline(set->mode);
+ config->mode_changed = true;
+ }
+}
+
+static int
+intel_modeset_stage_output_state(struct drm_device *dev,
+ struct drm_mode_set *set,
+ struct intel_set_config *config)
+{
+ struct drm_crtc *new_crtc;
+ struct intel_connector *connector;
+ struct intel_encoder *encoder;
+ /* LINTED */
+ int count, ro;
+
+ /* The upper layers ensure that we either disabl a crtc or have a list
+ * of connectors. For paranoia, double-check this. */
+ WARN_ON(!set->fb && (set->num_connectors != 0));
+ WARN_ON(set->fb && (set->num_connectors == 0));
+
+ count = 0;
+ list_for_each_entry(connector, struct intel_connector, &dev->mode_config.connector_list,
+ base.head) {
+ /* Otherwise traverse passed in connector list and get encoders
+ * for them. */
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == &connector->base) {
+ connector->new_encoder = connector->encoder;
+ break;
+ }
+ }
+
+ /* If we disable the crtc, disable all its connectors. Also, if
+ * the connector is on the changing crtc but not on the new
+ * connector list, disable it. */
+ if ((!set->fb || ro == set->num_connectors) &&
+ connector->base.encoder &&
+ connector->base.encoder->crtc == set->crtc) {
+ connector->new_encoder = NULL;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base));
+ }
+
+
+ if (&connector->new_encoder->base != connector->base.encoder) {
+ DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ config->mode_changed = true;
+ }
+ }
+ /* connector->new_encoder is now updated for all connectors. */
+
+ /* Update crtc of enabled connectors. */
+ count = 0;
+ list_for_each_entry(connector, struct intel_connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (!connector->new_encoder)
+ continue;
+
+ new_crtc = connector->new_encoder->base.crtc;
+
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == &connector->base)
+ new_crtc = set->crtc;
+ }
+
+ /* Make sure the new CRTC will work with the encoder */
+ if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
+ new_crtc)) {
+ return -EINVAL;
+ }
+ connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base),
+ new_crtc->base.id);
+ }
+
+ /* Check for any encoders that needs to be disabled. */
+ list_for_each_entry(encoder, struct intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ list_for_each_entry(connector, struct intel_connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->new_encoder == encoder) {
+ WARN_ON(!connector->new_encoder->new_crtc);
+
+ goto next_encoder;
+ }
+ }
+ encoder->new_crtc = NULL;
+next_encoder:
+ /* Only now check for crtc changes so we don't miss encoders
+ * that will be disabled. */
+ if (&encoder->new_crtc->base != encoder->base.crtc) {
+ DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ config->mode_changed = true;
+ }
+ }
+ /* Now we've also updated encoder->new_crtc for all encoders. */
+
+ return 0;
+}
+
+static int intel_crtc_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct drm_mode_set save_set;
+ struct intel_set_config *config;
+ int ret;
+
+ BUG_ON(!set);
+ BUG_ON(!set->crtc);
+ BUG_ON(!set->crtc->helper_private);
+
+ /* Enforce sane interface api - has been abused by the fb helper. */
+ BUG_ON(!set->mode && set->fb);
+ BUG_ON(set->fb && set->num_connectors == 0);
+
+ if (set->fb) {
+ DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+ }
+
+ dev = set->crtc->dev;
+
+ ret = -ENOMEM;
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ goto out_config;
+
+ ret = intel_set_config_save_state(dev, config);
+ if (ret)
+ goto out_config;
+
+ save_set.crtc = set->crtc;
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+ save_set.fb = set->crtc->fb;
+
+ /* Compute whether we need a full modeset, only an fb base update or no
+ * change at all. In the future we might also check whether only the
+ * mode changed, e.g. for LVDS where we only change the panel fitter in
+ * such cases. */
+ intel_set_config_compute_mode_changes(set, config);
+
+ ret = intel_modeset_stage_output_state(dev, set, config);
+ if (ret)
+ goto fail;
+
+ if (config->mode_changed) {
+ if ((set->mode == NULL) && set->fb) {
+ DRM_DEBUG_KMS("fb changed without set->mode");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = intel_set_mode(set->crtc, set->mode,
+ set->x, set->y, set->fb);
+ } else if (config->fb_changed) {
+ intel_crtc_wait_for_pending_flips(set->crtc);
+
+ ret = intel_pipe_set_base(set->crtc,
+ set->x, set->y, set->fb);
+ }
+
+ if (ret) {
+ DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
+ set->crtc->base.id, ret);
+fail:
+ intel_set_config_restore_state(dev, config);
+
+ /* Try to restore the config */
+ if (config->mode_changed &&
+ intel_set_mode(save_set.crtc, save_set.mode,
+ save_set.x, save_set.y, save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+ }
+
+out_config:
+ intel_set_config_free(dev, config);
+ return ret;
+}
+
+static const struct drm_crtc_funcs intel_crtc_funcs = {
+ .cursor_set = intel_crtc_cursor_set,
+ .cursor_move = intel_crtc_cursor_move,
+ .gamma_set = intel_crtc_gamma_set,
+ .set_config = intel_crtc_set_config,
+ .destroy = intel_crtc_destroy,
+ .page_flip = intel_crtc_page_flip,
+};
+
+static void intel_cpu_pll_init(struct drm_device *dev)
+{
+ if (HAS_DDI(dev))
+ intel_ddi_pll_init(dev);
+}
+
+static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+
+ val = I915_READ(PCH_DPLL(pll->id));
+ hw_state->dpll = val;
+ hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
+ hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
+
+ return val & DPLL_VCO_ENABLE;
+}
+
+static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t reg, val;
+
+ /* PCH refclock must be enabled first */
+ assert_pch_refclk_enabled(dev_priv);
+
+ reg = PCH_DPLL(pll->id);
+ val = I915_READ(reg);
+ val |= DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(200);
+}
+
+static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *crtc;
+ uint32_t reg, val;
+
+ /* Make sure no transcoder isn't still depending on us. */
+ list_for_each_entry(crtc, struct intel_crtc, &dev->mode_config.crtc_list, base.head) {
+ if (intel_crtc_to_shared_dpll(crtc) == pll)
+ assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
+ }
+
+ reg = PCH_DPLL(pll->id);
+ val = I915_READ(reg);
+ val &= ~DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(200);
+}
+
+static char *ibx_pch_dpll_names[] = {
+ "PCH DPLL A",
+ "PCH DPLL B",
+};
+
+static void ibx_pch_dpll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ dev_priv->num_shared_dpll = 2;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ dev_priv->shared_dplls[i].id = i;
+ dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
+ dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
+ dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
+ dev_priv->shared_dplls[i].get_hw_state =
+ ibx_pch_dpll_get_hw_state;
+ }
+}
+
+static void intel_shared_dpll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ ibx_pch_dpll_init(dev);
+ else
+ dev_priv->num_shared_dpll = 0;
+
+ BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
+ DRM_DEBUG_KMS("%i shared PLLs initialized\n",
+ dev_priv->num_shared_dpll);
+}
+
+static void intel_crtc_init(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc;
+ int i;
+
+ intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ if (intel_crtc == NULL)
+ return;
+
+ drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+
+ (void) drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
+ for (i = 0; i < 256; i++) {
+ intel_crtc->lut_r[i] = (u8) i;
+ intel_crtc->lut_g[i] = (u8) i;
+ intel_crtc->lut_b[i] = (u8) i;
+ }
+
+ /* Swap pipes & planes for FBC on pre-965 */
+ intel_crtc->pipe = pipe;
+ intel_crtc->plane = pipe;
+ if (IS_MOBILE(dev) && IS_GEN3(dev)) {
+ DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
+ intel_crtc->plane = !pipe;
+ }
+
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
+ dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+
+ drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+}
+
+/* LINTED */
+int intel_get_pipe_from_crtc_id(DRM_IOCTL_ARGS)
+{
+ struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
+ struct drm_mode_object *drmmode_obj;
+ struct intel_crtc *crtc;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+
+ if (!drmmode_obj) {
+ DRM_ERROR("no such CRTC id\n");
+ return -EINVAL;
+ }
+
+ crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+ pipe_from_crtc_id->pipe = crtc->pipe;
+
+ return 0;
+}
+
+static int intel_encoder_clones(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_encoder *source_encoder;
+ int index_mask = 0;
+ int entry = 0;
+
+ list_for_each_entry(source_encoder, struct intel_encoder,
+ &dev->mode_config.encoder_list, base.head) {
+
+ if (encoder == source_encoder)
+ index_mask |= (1 << entry);
+
+ /* Intel hw has only one MUX where enocoders could be cloned. */
+ if (encoder->cloneable && source_encoder->cloneable)
+ index_mask |= (1 << entry);
+
+ entry++;
+ }
+
+ return index_mask;
+}
+
+static bool has_edp_a(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_MOBILE(dev))
+ return false;
+
+ if ((I915_READ(DP_A) & DP_DETECTED) == 0)
+ return false;
+
+ if (IS_GEN5(dev) &&
+ (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
+ return false;
+
+ return true;
+}
+
+static void intel_setup_outputs(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *encoder;
+ bool dpd_is_edp = false;
+
+ intel_lvds_init(dev);
+
+ if (!IS_ULT(dev))
+ intel_crt_init(dev);
+
+ if (HAS_DDI(dev)) {
+ int found;
+
+ /* Haswell uses DDI functions to detect digital outputs */
+ found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
+ /* DDI A only supports eDP */
+ if (found)
+ intel_ddi_init(dev, PORT_A);
+
+ /* DDI B, C and D detection is indicated by the SFUSE_STRAP
+ * register */
+ found = I915_READ(SFUSE_STRAP);
+
+ if (found & SFUSE_STRAP_DDIB_DETECTED)
+ intel_ddi_init(dev, PORT_B);
+ if (found & SFUSE_STRAP_DDIC_DETECTED)
+ intel_ddi_init(dev, PORT_C);
+ if (found & SFUSE_STRAP_DDID_DETECTED)
+ intel_ddi_init(dev, PORT_D);
+ } else if (HAS_PCH_SPLIT(dev)) {
+ int found;
+ dpd_is_edp = intel_dpd_is_edp(dev);
+
+ if (has_edp_a(dev))
+ intel_dp_init(dev, DP_A, PORT_A);
+
+ if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
+ /* PCH SDVOB multiplex with HDMIB */
+ found = intel_sdvo_init(dev, PCH_SDVOB, true);
+ if (!found)
+ intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
+ if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
+ intel_dp_init(dev, PCH_DP_B, PORT_B);
+ }
+
+ if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
+ intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
+
+ if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
+ intel_hdmi_init(dev, PCH_HDMID, PORT_D);
+
+ if (I915_READ(PCH_DP_C) & DP_DETECTED)
+ intel_dp_init(dev, PCH_DP_C, PORT_C);
+
+ if (I915_READ(PCH_DP_D) & DP_DETECTED)
+ intel_dp_init(dev, PCH_DP_D, PORT_D);
+ } else if (IS_VALLEYVIEW(dev)) {
+ /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
+ if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
+
+ if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
+ PORT_B);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
+ }
+ } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
+ bool found = false;
+
+ if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
+ DRM_DEBUG_KMS("probing SDVOB\n");
+ found = intel_sdvo_init(dev, GEN3_SDVOB, true);
+ if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
+ DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
+ intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
+ }
+
+ if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ intel_dp_init(dev, DP_B, PORT_B);
+ }
+
+ /* Before G4X SDVOC doesn't have its own detect register */
+
+ if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
+ DRM_DEBUG_KMS("probing SDVOC\n");
+ found = intel_sdvo_init(dev, GEN3_SDVOC, false);
+ }
+
+ if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
+
+ if (SUPPORTS_INTEGRATED_HDMI(dev)) {
+ DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
+ intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
+ }
+ if (SUPPORTS_INTEGRATED_DP(dev))
+ intel_dp_init(dev, DP_C, PORT_C);
+ }
+
+ if (SUPPORTS_INTEGRATED_DP(dev) &&
+ (I915_READ(DP_D) & DP_DETECTED))
+ intel_dp_init(dev, DP_D, PORT_D);
+ } else if (IS_GEN2(dev))
+ intel_dvo_init(dev);
+
+ if (SUPPORTS_TV(dev))
+ intel_tv_init(dev);
+
+ list_for_each_entry(encoder, struct intel_encoder, &dev->mode_config.encoder_list, base.head) {
+ encoder->base.possible_crtcs = encoder->crtc_mask;
+ encoder->base.possible_clones =
+ intel_encoder_clones(encoder);
+ }
+
+ intel_init_pch_refclk(dev);
+
+ drm_helper_move_panel_connectors_to_head(dev);
+}
+
+static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ drm_framebuffer_cleanup(fb);
+ drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
+
+ kfree(intel_fb, sizeof (struct intel_framebuffer));
+}
+
+static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned int *handle)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+
+ return drm_gem_handle_create(file, &obj->base, handle);
+}
+
+static const struct drm_framebuffer_funcs intel_fb_funcs = {
+ .destroy = intel_user_framebuffer_destroy,
+ .create_handle = intel_user_framebuffer_create_handle,
+};
+
+int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *intel_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj)
+{
+ int pitch_limit;
+ int ret;
+
+ if (obj->tiling_mode == I915_TILING_Y) {
+ DRM_DEBUG("hardware does not support tiling Y\n");
+ return -EINVAL;
+ }
+
+ if (mode_cmd->pitches[0] & 63) {
+ DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
+ mode_cmd->pitches[0]);
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
+ pitch_limit = 32*1024;
+ } else if (INTEL_INFO(dev)->gen >= 4) {
+ if (obj->tiling_mode)
+ pitch_limit = 16*1024;
+ else
+ pitch_limit = 32*1024;
+ } else if (INTEL_INFO(dev)->gen >= 3) {
+ if (obj->tiling_mode)
+ pitch_limit = 8*1024;
+ else
+ pitch_limit = 16*1024;
+ } else
+ /* XXX DSPC is limited to 4k tiled */
+ pitch_limit = 8*1024;
+
+ if (mode_cmd->pitches[0] > pitch_limit) {
+ DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
+ obj->tiling_mode ? "tiled" : "linear",
+ mode_cmd->pitches[0], pitch_limit);
+ return -EINVAL;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE &&
+ mode_cmd->pitches[0] != obj->stride) {
+ DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], obj->stride);
+ return -EINVAL;
+ }
+
+ /* Reject formats not supported by any plane early. */
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ if (INTEL_INFO(dev)->gen > 3) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format));
+ return -EINVAL;
+ }
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ if (INTEL_INFO(dev)->gen < 4) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format));
+ return -EINVAL;
+ }
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_VYUY:
+ if (INTEL_INFO(dev)->gen < 5) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format));
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format));
+ return -EINVAL;
+ }
+
+ /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+ if (mode_cmd->offsets[0] != 0)
+ return -EINVAL;
+
+ (void) drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
+ intel_fb->obj = obj;
+
+ ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
+ if (ret) {
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct drm_framebuffer *
+intel_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
+ mode_cmd->handles[0]));
+ if (&obj->base == NULL)
+ return (NULL);
+
+ return intel_framebuffer_create(dev, mode_cmd, obj);
+}
+
+static const struct drm_mode_config_funcs intel_mode_funcs = {
+ .fb_create = intel_user_framebuffer_create,
+ .output_poll_changed = intel_fb_output_poll_changed,
+};
+
+/* Set up chip specific display functions */
+static void intel_init_display(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
+ dev_priv->display.find_dpll = g4x_find_best_dpll;
+ else if (IS_VALLEYVIEW(dev))
+ dev_priv->display.find_dpll = vlv_find_best_dpll;
+ else if (IS_PINEVIEW(dev))
+ dev_priv->display.find_dpll = pnv_find_best_dpll;
+ else
+ dev_priv->display.find_dpll = i9xx_find_best_dpll;
+
+ if (HAS_DDI(dev)) {
+ dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+ dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
+ dev_priv->display.crtc_enable = haswell_crtc_enable;
+ dev_priv->display.crtc_disable = haswell_crtc_disable;
+ dev_priv->display.off = haswell_crtc_off;
+ dev_priv->display.update_plane = ironlake_update_plane;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+ dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.crtc_enable = ironlake_crtc_enable;
+ dev_priv->display.crtc_disable = ironlake_crtc_disable;
+ dev_priv->display.off = ironlake_crtc_off;
+ dev_priv->display.update_plane = ironlake_update_plane;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_enable = valleyview_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ dev_priv->display.off = i9xx_crtc_off;
+ dev_priv->display.update_plane = i9xx_update_plane;
+ } else {
+ dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.crtc_enable = i9xx_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ dev_priv->display.off = i9xx_crtc_off;
+ dev_priv->display.update_plane = i9xx_update_plane;
+ }
+
+ /* Returns the core display clock speed */
+ if (IS_VALLEYVIEW(dev))
+ dev_priv->display.get_display_clock_speed =
+ valleyview_get_display_clock_speed;
+ else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+ dev_priv->display.get_display_clock_speed =
+ i945_get_display_clock_speed;
+ else if (IS_I915G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i915_get_display_clock_speed;
+ else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
+ dev_priv->display.get_display_clock_speed =
+ i9xx_misc_get_display_clock_speed;
+ else if (IS_I915GM(dev))
+ dev_priv->display.get_display_clock_speed =
+ i915gm_get_display_clock_speed;
+ else if (IS_I865G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i865_get_display_clock_speed;
+ else if (IS_I85X(dev))
+ dev_priv->display.get_display_clock_speed =
+ i855_get_display_clock_speed;
+ else /* 852, 830 */
+ dev_priv->display.get_display_clock_speed =
+ i830_get_display_clock_speed;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ if (IS_GEN5(dev)) {
+ dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ } else if (IS_GEN6(dev)) {
+ dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ } else if (IS_IVYBRIDGE(dev)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.modeset_global_resources =
+ ivb_modeset_global_resources;
+ } else if (IS_HASWELL(dev)) {
+ dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+ dev_priv->display.write_eld = haswell_write_eld;
+ dev_priv->display.modeset_global_resources =
+ haswell_modeset_global_resources;
+ }
+ } else if (IS_G4X(dev)) {
+ dev_priv->display.write_eld = g4x_write_eld;
+ }
+
+ /* Default just returns -ENODEV to indicate unsupported */
+ dev_priv->display.queue_flip = intel_default_queue_flip;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ dev_priv->display.queue_flip = intel_gen2_queue_flip;
+ break;
+
+ case 3:
+ dev_priv->display.queue_flip = intel_gen3_queue_flip;
+ break;
+
+ case 4:
+ case 5:
+ dev_priv->display.queue_flip = intel_gen4_queue_flip;
+ break;
+
+ case 6:
+ dev_priv->display.queue_flip = intel_gen6_queue_flip;
+ break;
+ case 7:
+ dev_priv->display.queue_flip = intel_gen7_queue_flip;
+ break;
+ }
+}
+
+/*
+ * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
+ * resume, or other times. This quirk makes sure that's the case for
+ * affected systems.
+ */
+static void quirk_pipea_force (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->quirks |= QUIRK_PIPEA_FORCE;
+ DRM_INFO("applying pipe a force quirk\n");
+}
+
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ DRM_INFO("applying inverted panel brightness quirk\n");
+}
+
+#if 0
+/*
+ * Some machines (Dell XPS13) suffer broken backlight controls if
+ * BLM_PCH_PWM_ENABLE is set.
+ */
+static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
+ DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
+}
+
+struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+ void (*hook)(struct drm_device *dev);
+};
+
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+ void (*hook)(struct drm_device *dev);
+ const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+ DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+ return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+ {
+ .dmi_id_list = &(const struct dmi_system_id[]) {
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "NCR Corporation",
+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
+ },
+ },
+ { } /* terminating entry */
+ },
+ .hook = quirk_invert_brightness,
+ },
+};
+
+static struct intel_quirk intel_quirks[] = {
+ /* HP Mini needs pipe A force quirk (LP: #322104) */
+ { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
+
+ /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
+ { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
+
+ /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
+ { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
+
+ /* 830/845 need to leave pipe A & dpll A up */
+ { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+ { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+
+ /* Lenovo U160 cannot use SSC on LVDS */
+ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+ /* Sony Vaio Y cannot use SSC on LVDS */
+ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer/eMachines G725 */
+ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+ /* Acer/eMachines e725 */
+ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+ /* Dell XPS13 HD Sandy Bridge */
+ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
+ /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
+ { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
+};
+
+static void intel_init_quirks(struct drm_device *dev)
+{
+ struct pci_dev *d = dev->pdev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+ struct intel_quirk *q = &intel_quirks[i];
+
+ if (d->device == q->device &&
+ (d->subsystem_vendor == q->subsystem_vendor ||
+ q->subsystem_vendor == PCI_ANY_ID) &&
+ (d->subsystem_device == q->subsystem_device ||
+ q->subsystem_device == PCI_ANY_ID))
+ q->hook(dev);
+ }
+ for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+ if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+ intel_dmi_quirks[i].hook(dev);
+ }
+}
+#endif
+/* Disable the VGA plane that we never use */
+static void i915_disable_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 sr1;
+ u32 vga_reg = i915_vgacntrl_reg(dev);
+
+ I915_WRITE8(VGA_SR_INDEX, SR01);
+ sr1 = I915_READ8(VGA_SR_DATA);
+ I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5));
+ udelay(300);
+
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+}
+
+void intel_modeset_init_hw(struct drm_device *dev)
+{
+ intel_init_power_well(dev);
+
+ intel_prepare_ddi(dev);
+
+ intel_init_clock_gating(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_enable_gt_powersave(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+void intel_modeset_suspend_hw(struct drm_device *dev)
+{
+ intel_suspend_hw(dev);
+}
+
+void intel_modeset_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i, j, ret;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ dev->mode_config.funcs = (void *)&intel_mode_funcs;
+
+ /* OSOL_I915 intel_init_quirks(dev); */
+
+ intel_init_pm(dev);
+
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return;
+
+ intel_init_display(dev);
+
+ if (IS_GEN2(dev)) {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ } else if (IS_GEN3(dev)) {
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+ } else {
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+ }
+ dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
+
+ DRM_DEBUG_KMS("%d display pipe%s available.\n",
+ INTEL_INFO(dev)->num_pipes,
+ INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
+
+ for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+ intel_crtc_init(dev, i);
+ for (j = 0; j < dev_priv->num_plane; j++) {
+ ret = intel_plane_init(dev, i, j);
+ if (ret)
+ DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
+ pipe_name(i), sprite_name(i, j), ret);
+ }
+ }
+
+ intel_cpu_pll_init(dev);
+ intel_shared_dpll_init(dev);
+
+ /* Just disable it once at startup */
+ i915_disable_vga(dev);
+ intel_setup_outputs(dev);
+
+ /* Just in case the BIOS is doing something questionable. */
+ intel_disable_fbc(dev);
+}
+
+static void
+intel_connector_break_all_links(struct intel_connector *connector)
+{
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ connector->encoder->connectors_active = false;
+ connector->encoder->base.crtc = NULL;
+}
+
+static void intel_enable_pipe_a(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+ struct drm_connector *crt = NULL;
+ struct intel_load_detect_pipe load_detect_temp;
+
+ /* We can't just switch on the pipe A, we need to set things up with a
+ * proper mode and output configuration. As a gross hack, enable pipe A
+ * by enabling the load detect pipe once. */
+ list_for_each_entry(connector, struct intel_connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
+ crt = &connector->base;
+ break;
+ }
+ }
+
+ if (!crt)
+ return;
+
+ if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
+ intel_release_load_detect_pipe(crt, &load_detect_temp);
+
+
+}
+
+static bool
+intel_check_plane_mapping(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg, val;
+
+ if (INTEL_INFO(dev)->num_pipes == 1)
+ return true;
+
+ reg = DSPCNTR(!crtc->plane);
+ val = I915_READ(reg);
+
+ if ((val & DISPLAY_PLANE_ENABLE) &&
+ (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
+ return false;
+
+ return true;
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+
+ /* Clear any frame start delays used for debugging left by the BIOS */
+ reg = PIPECONF(crtc->config.cpu_transcoder);
+ I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+
+ /* We need to sanitize the plane -> pipe mapping first because this will
+ * disable the crtc (and hence change the state) if it is wrong. Note
+ * that gen4+ has a fixed plane -> pipe mapping. */
+ if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
+ struct intel_connector *connector;
+ bool plane;
+
+ DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
+ crtc->base.base.id);
+
+ /* Pipe has the wrong plane attached and the plane is active.
+ * Temporarily change the plane mapping and disable everything
+ * ... */
+ plane = crtc->plane;
+ crtc->plane = !plane;
+ dev_priv->display.crtc_disable(&crtc->base);
+ crtc->plane = plane;
+
+ /* ... and break all links. */
+ list_for_each_entry(connector, struct intel_connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder->base.crtc != &crtc->base)
+ continue;
+
+ intel_connector_break_all_links(connector);
+ }
+
+ WARN_ON(crtc->active);
+ crtc->base.enabled = false;
+ }
+
+ if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+ crtc->pipe == PIPE_A && !crtc->active) {
+ /* BIOS forgot to enable pipe A, this mostly happens after
+ * resume. Force-enable the pipe to fix this, the update_dpms
+ * call below we restore the pipe to the right state, but leave
+ * the required bits on. */
+ intel_enable_pipe_a(dev);
+ }
+
+ /* Adjust the state of the output pipe according to whether we
+ * have active connectors/encoders. */
+ intel_crtc_update_dpms(&crtc->base);
+
+ if (crtc->active != crtc->base.enabled) {
+ struct intel_encoder *encoder;
+
+ /* This can happen either due to bugs in the get_hw_state
+ * functions or because the pipe is force-enabled due to the
+ * pipe A quirk. */
+ DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
+ crtc->base.base.id,
+ crtc->base.enabled ? "enabled" : "disabled",
+ crtc->active ? "enabled" : "disabled");
+
+ crtc->base.enabled = crtc->active;
+
+ /* Because we only establish the connector -> encoder ->
+ * crtc links if something is active, this means the
+ * crtc is now deactivated. Break the links. connector
+ * -> encoder links are only establish when things are
+ * actually up, hence no need to break them. */
+ WARN_ON(crtc->active);
+
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+ WARN_ON(encoder->connectors_active);
+ encoder->base.crtc = NULL;
+ }
+ }
+}
+
+static void intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+ struct intel_connector *connector;
+ struct drm_device *dev = encoder->base.dev;
+
+ /* We need to check both for a crtc link (meaning that the
+ * encoder is active and trying to read from a pipe) and the
+ * pipe itself being active. */
+ bool has_active_crtc = encoder->base.crtc &&
+ to_intel_crtc(encoder->base.crtc)->active;
+
+ if (encoder->connectors_active && !has_active_crtc) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+
+ /* Connector is active, but has no active pipe. This is
+ * fallout from our resume register restoring. Disable
+ * the encoder manually again. */
+ if (encoder->base.crtc) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base));
+ encoder->disable(encoder);
+ }
+
+ /* Inconsistent output/port/pipe state happens presumably due to
+ * a bug in one of the get_hw_state functions. Or someplace else
+ * in our code, like the register restore mess on resume. Clamp
+ * things to off as a safer default. */
+ list_for_each_entry(connector, struct intel_connector,
+ &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ intel_connector_break_all_links(connector);
+ }
+ }
+ /* Enabled encoders without active connectors will be fixed in
+ * the crtc fixup. */
+}
+
+void i915_redisable_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 vga_reg = i915_vgacntrl_reg(dev);
+
+ if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ i915_disable_vga(dev);
+ }
+}
+
+static void intel_modeset_readout_hw_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ int i;
+
+ list_for_each_entry(crtc, struct intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ (void) memset(&crtc->config, 0, sizeof(crtc->config));
+
+ crtc->active = dev_priv->display.get_pipe_config(crtc,
+ &crtc->config);
+
+ crtc->base.enabled = crtc->active;
+
+ DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
+ crtc->base.base.id,
+ crtc->active ? "enabled" : "disabled");
+ }
+
+ /* FIXME: Smash this into the new shared dpll infrastructure. */
+ if (HAS_DDI(dev))
+ intel_ddi_setup_hw_pll_state(dev);
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
+ pll->active = 0;
+ list_for_each_entry(crtc, struct intel_crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
+ pll->active++;
+ }
+ pll->refcount = pll->active;
+
+ DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
+ pll->name, pll->refcount, pll->on);
+ }
+
+ list_for_each_entry(encoder, struct intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ pipe = 0;
+
+ if (encoder->get_hw_state(encoder, &pipe)) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ encoder->base.crtc = &crtc->base;
+ if (encoder->get_config)
+ encoder->get_config(encoder, &crtc->config);
+ } else {
+ encoder->base.crtc = NULL;
+ }
+
+ encoder->connectors_active = false;
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
+ encoder->base.base.id,
+ drm_get_encoder_name(&encoder->base),
+ encoder->base.crtc ? "enabled" : "disabled",
+ pipe);
+ }
+
+ list_for_each_entry(connector, struct intel_connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->get_hw_state(connector)) {
+ connector->base.dpms = DRM_MODE_DPMS_ON;
+ connector->encoder->connectors_active = true;
+ connector->base.encoder = &connector->encoder->base;
+ } else {
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
+ connector->base.base.id,
+ drm_get_connector_name(&connector->base),
+ connector->base.encoder ? "enabled" : "disabled");
+ }
+}
+
+/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
+ * and i915 state tracking structures. */
+void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe;
+ struct drm_plane *plane;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ int i;
+
+ intel_modeset_readout_hw_state(dev);
+
+ /* HW state is read out, now we need to sanitize this mess. */
+ list_for_each_entry(encoder, struct intel_encoder, &dev->mode_config.encoder_list,
+ base.head) {
+ intel_sanitize_encoder(encoder);
+ }
+
+ for_each_pipe(pipe) {
+ crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+ intel_sanitize_crtc(crtc);
+ intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
+ }
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ if (!pll->on || pll->active)
+ continue;
+
+ DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
+
+ pll->disable(dev_priv, pll);
+ pll->on = false;
+ }
+
+ if (force_restore) {
+ /*
+ * We need to use raw interfaces for restoring state to avoid
+ * checking (bogus) intermediate states.
+ */
+ for_each_pipe(pipe) {
+ struct drm_crtc *crtc =
+ dev_priv->pipe_to_crtc_mapping[pipe];
+
+ __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
+ crtc->fb);
+ }
+ list_for_each_entry(plane, struct drm_plane, &dev->mode_config.plane_list, head)
+ intel_plane_restore(plane);
+
+ i915_redisable_vga(dev);
+ } else {
+ intel_modeset_update_staged_output_state(dev);
+ }
+
+ intel_modeset_check_state(dev);
+
+ drm_mode_config_reset(dev);
+}
+
+void intel_modeset_gem_init(struct drm_device *dev)
+{
+ intel_modeset_init_hw(dev);
+
+ intel_setup_overlay(dev);
+
+ intel_modeset_setup_hw_state(dev, false);
+}
+
+void intel_modeset_cleanup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ /* LINTED */
+ struct intel_crtc *intel_crtc;
+
+ /*
+ * Interrupts and polling as the first thing to avoid creating havoc.
+ * Too much stuff here (turning of rps, connectors, ...) would
+ * experience fancy races otherwise.
+ */
+ drm_irq_uninstall(dev);
+ /* OSOL_I915 cancel_work_sync(&dev_priv->hotplug_work); */
+ cancel_delayed_work(dev_priv->wq);
+ /*
+ * Due to the hpd irq storm handling the hotplug work can re-arm the
+ * poll handlers. Hence disable polling after hpd handling is shut down.
+ */
+ drm_kms_helper_poll_fini(dev);
+ mutex_lock(&dev->struct_mutex);
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ /* Skip inactive CRTCs */
+ if (!crtc->fb)
+ continue;
+
+ intel_crtc = to_intel_crtc(crtc);
+ intel_increase_pllclock(crtc);
+ }
+
+ intel_disable_fbc(dev);
+
+ intel_disable_gt_powersave(dev);
+
+ ironlake_teardown_rc6(dev);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ /* flush any delayed tasks or pending work */
+ /* OSOL_I915 flush_scheduled_work(); */
+
+ /* destroy backlight, if any, before the connectors */
+ intel_panel_destroy_backlight(dev);
+
+ drm_mode_config_cleanup(dev);
+
+ intel_cleanup_overlay(dev);
+}
+
+
+/* current intel driver doesn't take advantage of encoders
+ always give back the encoder for the connector
+*/
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+{
+ return &intel_attached_encoder(connector)->base;
+}
+
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder)
+{
+ connector->encoder = encoder;
+ (void) drm_mode_connector_attach_encoder(&connector->base,
+ &encoder->base);
+}
+
+/*
+ * set vga decode state - true == enable VGA decode
+ */
+int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+{
+ u16 gmch_ctrl;
+
+ pci_read_config_word(dev->pdev, INTEL_GMCH_CTRL, &gmch_ctrl);
+ if (state)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+ pci_write_config_word(dev->pdev, INTEL_GMCH_CTRL, gmch_ctrl);
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+struct intel_display_error_state {
+
+ u32 power_well_driver;
+
+ int num_transcoders;
+
+ struct intel_cursor_error_state {
+ u32 control;
+ u32 position;
+ u32 base;
+ u32 size;
+ } cursor[I915_MAX_PIPES];
+
+ struct intel_pipe_error_state {
+ u32 source;
+ } pipe[I915_MAX_PIPES];
+
+ struct intel_plane_error_state {
+ u32 control;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 addr;
+ u32 surface;
+ u32 tile_offset;
+ } plane[I915_MAX_PIPES];
+
+ struct intel_transcoder_error_state {
+ enum transcoder cpu_transcoder;
+
+ u32 conf;
+
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ } transcoder[4];
+
+};
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_display_error_state *error;
+ int transcoders[4] = {
+ TRANSCODER_A,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP,
+ };
+ int i;
+
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return NULL;
+
+ error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ if (error == NULL)
+ return NULL;
+
+ if (HAS_POWER_WELL(dev))
+ error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
+
+ for_each_pipe(i) {
+
+ if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
+ error->cursor[i].control = I915_READ(CURCNTR(i));
+ error->cursor[i].position = I915_READ(CURPOS(i));
+ error->cursor[i].base = I915_READ(CURBASE(i));
+ } else {
+ error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
+ error->cursor[i].position = I915_READ(CURPOS_IVB(i));
+ error->cursor[i].base = I915_READ(CURBASE_IVB(i));
+ }
+
+ error->plane[i].control = I915_READ(DSPCNTR(i));
+ error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+ if (INTEL_INFO(dev)->gen <= 3) {
+ error->plane[i].size = I915_READ(DSPSIZE(i));
+ error->plane[i].pos = I915_READ(DSPPOS(i));
+ }
+ if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ error->plane[i].addr = I915_READ(DSPADDR(i));
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->plane[i].surface = I915_READ(DSPSURF(i));
+ error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+ }
+
+ error->pipe[i].source = I915_READ(PIPESRC(i));
+ }
+ error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+ if (HAS_DDI(dev_priv->dev))
+ error->num_transcoders++; /* Account for eDP. */
+
+ for (i = 0; i < error->num_transcoders; i++) {
+ enum transcoder cpu_transcoder = transcoders[i];
+
+ error->transcoder[i].cpu_transcoder = cpu_transcoder;
+
+ error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
+ error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+ error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+ error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+ error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+ error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+ error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+ }
+
+ /* In the code above we read the registers without checking if the power
+ * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
+ * prevent the next I915_WRITE from detecting it and printing an error
+ * message. */
+ if (HAS_POWER_WELL(dev))
+ I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+
+ return error;
+}
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+
+void
+intel_display_print_error_state(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev,
+ struct intel_display_error_state *error)
+{
+ int i;
+
+ if (!error)
+ return;
+
+ err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
+ if (HAS_POWER_WELL(dev))
+ err_printf(m, "PWR_WELL_CTL2: %08x\n",
+ error->power_well_driver);
+ for_each_pipe(i) {
+ err_printf(m, "Pipe [%d]:\n", i);
+ err_printf(m, " CONF: %08x\n", error->pipe[i].conf);
+
+ err_printf(m, "Plane [%d]:\n", i);
+ err_printf(m, " CNTR: %08x\n", error->plane[i].control);
+ err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
+ if (INTEL_INFO(dev)->gen <= 3) {
+ err_printf(m, " SIZE: %08x\n", error->plane[i].size);
+ err_printf(m, " POS: %08x\n", error->plane[i].pos);
+ }
+ if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ err_printf(m, " SURF: %08x\n", error->plane[i].surface);
+ err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
+ }
+
+ err_printf(m, "Cursor [%d]:\n", i);
+ err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
+ err_printf(m, " POS: %08x\n", error->cursor[i].position);
+ err_printf(m, " BASE: %08x\n", error->cursor[i].base);
+ }
+
+ for (i = 0; i < error->num_transcoders; i++) {
+ err_printf(m, " CPU transcoder: %c\n",
+ transcoder_name(error->transcoder[i].cpu_transcoder));
+ err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
+ err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
+ err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
+ err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
+ err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
+ err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
+ err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
++ }
+}
+#endif
diff --git a/usr/src/uts/intel/io/i915/intel_dp.c b/usr/src/uts/intel/io/i915/intel_dp.c
new file mode 100644
index 0000000..a460a9d
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_dp.c
@@ -0,0 +1,3265 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2008, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_sun_i2c.h" /* OSOL_i915 */
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "drm_dp_helper.h"
+#include "drm_edid.h"
+
+#define DP_RECEIVER_CAP_SIZE 0xf
+#define DP_LINK_STATUS_SIZE 6
+#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+ return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
+}
+
+static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+ return intel_dig_port->base.base.dev;
+}
+
+static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
+{
+ return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+}
+
+static void intel_dp_link_down(struct intel_dp *intel_dp);
+
+static int
+intel_dp_max_link_bw(struct intel_dp *intel_dp)
+{
+ int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ case DP_LINK_BW_2_7:
+ break;
+ case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
+ max_link_bw = DP_LINK_BW_2_7;
+ break;
+ default:
+ DRM_ERROR("invalid max DP link bw val %x, using 1.62Gbps\n",
+ max_link_bw);
+ max_link_bw = DP_LINK_BW_1_62;
+ break;
+ }
+ return max_link_bw;
+}
+
+/*
+ * The units on the numbers in the next two are... bizarre. Examples will
+ * make it clearer; this one parallels an example in the eDP spec.
+ *
+ * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
+ *
+ * 270000 * 1 * 8 / 10 == 216000
+ *
+ * The actual data capacity of that configuration is 2.16Gbit/s, so the
+ * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
+ * or equivalently, kilopixels per second - so for 1680x1050R it'd be
+ * 119000. At 18bpp that's 2142000 kilobits per second.
+ *
+ * Thus the strange-looking division by 10 in intel_dp_link_required, to
+ * get the result in decakilobits instead of kilobits.
+ */
+
+static int
+intel_dp_link_required(int pixel_clock, int bpp)
+{
+ return (pixel_clock * bpp + 9) / 10;
+}
+
+static int
+intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+{
+ return (max_link_clock * max_lanes * 8) / 10;
+}
+
+static int
+intel_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ int target_clock = mode->clock;
+ int max_rate, mode_rate, max_lanes, max_link_clock;
+
+ if (is_edp(intel_dp) && fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+
+ target_clock = fixed_mode->clock;
+ }
+
+ max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
+ max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+ mode_rate = intel_dp_link_required(target_clock, 18);
+
+ if (mode_rate > max_rate)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
+ return MODE_OK;
+}
+
+static uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+ int i;
+ uint32_t v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((uint32_t) src[i]) << ((3-i) * 8);
+ return v;
+}
+
+static void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+ int i;
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3-i) * 8);
+}
+
+/* hrawclock is 1/4 the FSB frequency */
+static int
+intel_hrawclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t clkcfg;
+
+ /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
+ if (IS_VALLEYVIEW(dev))
+ return 200;
+
+ clkcfg = I915_READ(CLKCFG);
+ switch (clkcfg & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_400:
+ return 100;
+ case CLKCFG_FSB_533:
+ return 133;
+ case CLKCFG_FSB_667:
+ return 166;
+ case CLKCFG_FSB_800:
+ return 200;
+ case CLKCFG_FSB_1067:
+ return 266;
+ case CLKCFG_FSB_1333:
+ return 333;
+ /* these two are just a guess; one of them might be right */
+ case CLKCFG_FSB_1600:
+ case CLKCFG_FSB_1600_ALT:
+ return 400;
+ default:
+ return 133;
+ }
+}
+
+static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_stat_reg;
+
+ pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+ return (I915_READ(pp_stat_reg) & PP_ON) != 0;
+}
+
+static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_ctrl_reg;
+
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
+}
+
+static void
+intel_dp_check_edp(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_stat_reg, pp_ctrl_reg;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+ if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
+ DRM_ERROR("eDP powered off while attempting aux channel communication.");
+ DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
+ I915_READ(pp_stat_reg),
+ I915_READ(pp_ctrl_reg));
+ }
+}
+
+static uint32_t
+intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+ uint32_t status;
+ bool done;
+
+#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ if (has_aux_irq) {
+ DRM_WAIT_ON(done, &dev_priv->gmbus_wait_queue, 10, C);
+ } else {
+ done = wait_for(C, 10) == 0;
+ }
+ if (!done)
+ DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
+ has_aux_irq);
+#undef C
+
+ return status;
+}
+
+static int
+intel_dp_aux_ch(struct intel_dp *intel_dp,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+ uint32_t ch_data = ch_ctl + 4;
+ int i, ret, recv_bytes;
+ uint32_t status;
+ uint32_t aux_clock_divider;
+ int try, precharge;
+ bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+
+ /* dp aux is extremely sensitive to irq latency, hence request the
+ * lowest possible wakeup latency and so prevent the cpu from going into
+ * deep sleep states.
+ */
+
+ intel_dp_check_edp(intel_dp);
+ /* The clock divider is based off the hrawclk,
+ * and would like to run at 2MHz. So, take the
+ * hrawclk value and divide by 2 and use that
+ *
+ * Note that PCH attached eDP panels should use a 125MHz input
+ * clock divider.
+ */
+ if (IS_VALLEYVIEW(dev)) {
+ aux_clock_divider = 100;
+ } else if (intel_dig_port->port == PORT_A) {
+ if (HAS_DDI(dev))
+ aux_clock_divider = POS_DIV_ROUND_CLOSEST(
+ intel_ddi_get_cdclk_freq(dev_priv), 2000);
+ else if (IS_GEN6(dev) || IS_GEN7(dev))
+ aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ else
+ aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+ } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+ /* Workaround for non-ULT HSW */
+ aux_clock_divider = 74;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+ } else {
+ aux_clock_divider = intel_hrawclk(dev) / 2;
+ }
+
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
+
+ /* Try to wait for any previous AUX channel activity */
+ for (try = 0; try < 3; try++) {
+ status = I915_READ_NOTRACE(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (try == 3) {
+ DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
+ I915_READ(ch_ctl));
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ I915_WRITE(ch_data + i,
+ pack_aux(send + i, send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ I915_WRITE(ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY |
+ (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ /* workaround: edp and dp doesn't work with intel_dp_aux_wait_done */
+// status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
+ for (;;) {
+ status = I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ udelay(100);
+ }
+
+
+ /* Clear done status and any errors */
+ I915_WRITE(ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR))
+ continue;
+ if (status & DP_AUX_CH_CTL_DONE)
+ break;
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Check for timeout or receive error.
+ * Timeouts occur when the sink is not connected
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Timeouts occur when the device isn't connected, so they're
+ * "normal" -- don't fill the kernel log with these */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4)
+ unpack_aux(I915_READ(ch_data + i),
+ recv + i, recv_bytes - i);
+
+ ret = recv_bytes;
+out:
+ return ret;
+}
+
+/* Write data to the aux channel in native mode */
+static int
+intel_dp_aux_native_write(struct intel_dp *intel_dp,
+ uint16_t address, uint8_t *send, int send_bytes)
+{
+ int ret;
+ uint8_t msg[20];
+ int msg_bytes;
+ uint8_t ack;
+
+ intel_dp_check_edp(intel_dp);
+ if (send_bytes > 16)
+ return -1;
+ msg[0] = AUX_NATIVE_WRITE << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = send_bytes - 1;
+ (void) memcpy(&msg[4], send, send_bytes);
+ msg_bytes = send_bytes + 4;
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
+ if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ break;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+ return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+static int
+intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
+ uint16_t address, uint8_t byte)
+{
+ return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+static int
+intel_dp_aux_native_read(struct intel_dp *intel_dp,
+ uint16_t address, uint8_t *recv, int recv_bytes)
+{
+ uint8_t msg[4];
+ int msg_bytes;
+ uint8_t reply[20];
+ int reply_bytes;
+ uint8_t ack;
+ int ret;
+
+ intel_dp_check_edp(intel_dp);
+ msg[0] = AUX_NATIVE_READ << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = recv_bytes - 1;
+
+ msg_bytes = 4;
+ reply_bytes = recv_bytes + 1;
+
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret == 0)
+ return -EPROTO;
+ if (ret < 0)
+ return ret;
+ ack = reply[0];
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+ (void) memcpy(recv, reply + 1, ret - 1);
+ return ret - 1;
+ }
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+}
+
+static int
+intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ struct intel_dp *intel_dp = container_of(adapter,
+ struct intel_dp,
+ adapter);
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ unsigned retry;
+ int msg_bytes;
+ int reply_bytes;
+ int ret;
+
+ intel_dp_check_edp(intel_dp);
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[0] = AUX_I2C_READ << 4;
+ else
+ msg[0] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[0] |= AUX_I2C_MOT << 4;
+
+ msg[1] = (uint8_t)address >> 8;
+ msg[2] = (uint8_t)address;
+
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[3] = 0;
+ msg[4] = write_byte;
+ msg_bytes = 5;
+ reply_bytes = 1;
+ break;
+ case MODE_I2C_READ:
+ msg[3] = 0;
+ msg_bytes = 4;
+ reply_bytes = 2;
+ break;
+ default:
+ msg_bytes = 3;
+ reply_bytes = 1;
+ break;
+ }
+
+ for (retry = 0; retry < 5; retry++) {
+ ret = intel_dp_aux_ch(intel_dp,
+ msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+ return ret;
+ }
+
+ switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
+ case AUX_NATIVE_REPLY_ACK:
+ /* I2C-over-AUX Reply field is only valid
+ * when paired with AUX ACK.
+ */
+ break;
+ case AUX_NATIVE_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_ch native nack\n");
+ return -EIO;
+ case AUX_NATIVE_REPLY_DEFER:
+ udelay(100);
+ continue;
+ default:
+ DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+ reply[0]);
+ return -EIO;
+ }
+
+ switch (reply[0] & AUX_I2C_REPLY_MASK) {
+ case AUX_I2C_REPLY_ACK:
+ if (mode == MODE_I2C_READ) {
+ *read_byte = reply[1];
+ }
+ return reply_bytes - 1;
+ case AUX_I2C_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_i2c nack\n");
+ return -EIO;
+ case AUX_I2C_REPLY_DEFER:
+ DRM_DEBUG_KMS("aux_i2c defer\n");
+ udelay(100);
+ break;
+ default:
+ DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
+ return -EIO;
+ }
+ }
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EIO;
+}
+
+static int
+intel_dp_i2c_init(struct intel_dp *intel_dp,
+ struct intel_connector *intel_connector, const char *name)
+{
+ int ret;
+ DRM_DEBUG_KMS("i2c_init %s\n", name);
+ intel_dp->algo.running = false;
+ intel_dp->algo.address = 0;
+ intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
+
+ memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
+ /* OSOL_i915: dp_priv->adapter.owner = THIS_MODULE; */
+ /* OSOL_i915: dp_priv->adapter.class = I2C_CLASS_DDC; */
+ strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+ intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+ intel_dp->adapter.algo_data = &intel_dp->algo;
+ /* OSOL_i915: dp_priv->adapter.dev.parent = &intel_encoder->base.kdev; */
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ return ret;
+}
+
+static void
+intel_dp_set_clock(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config, int link_bw)
+{
+ struct drm_device *dev = encoder->base.dev;
+
+ if (IS_G4X(dev)) {
+ if (link_bw == DP_LINK_BW_1_62) {
+ pipe_config->dpll.p1 = 2;
+ pipe_config->dpll.p2 = 10;
+ pipe_config->dpll.n = 2;
+ pipe_config->dpll.m1 = 23;
+ pipe_config->dpll.m2 = 8;
+ } else {
+ pipe_config->dpll.p1 = 1;
+ pipe_config->dpll.p2 = 10;
+ pipe_config->dpll.n = 1;
+ pipe_config->dpll.m1 = 14;
+ pipe_config->dpll.m2 = 2;
+ }
+ pipe_config->clock_set = true;
+// } else if (IS_HASWELL(dev)) {
+ /* Haswell has special-purpose DP DDI clocks. */
+ } else if (HAS_PCH_SPLIT(dev)) {
+ if (link_bw == DP_LINK_BW_1_62) {
+ pipe_config->dpll.n = 1;
+ pipe_config->dpll.p1 = 2;
+ pipe_config->dpll.p2 = 10;
+ pipe_config->dpll.m1 = 12;
+ pipe_config->dpll.m2 = 9;
+ } else {
+ pipe_config->dpll.n = 2;
+ pipe_config->dpll.p1 = 1;
+ pipe_config->dpll.p2 = 10;
+ pipe_config->dpll.m1 = 14;
+ pipe_config->dpll.m2 = 8;
+ }
+ pipe_config->clock_set = true;
+// } else if (IS_VALLEYVIEW(dev)) {
+ /* FIXME: Need to figure out optimized DP clocks for vlv. */
+ }
+}
+
+bool
+intel_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ enum port port = dp_to_dig_port(intel_dp)->port;
+ struct intel_crtc *intel_crtc = encoder->new_crtc;
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ int lane_count, clock;
+ int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+ int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ int bpp, mode_rate;
+ static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+ int link_avail, link_clock;
+
+ if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
+ pipe_config->has_pch_encoder = true;
+
+ pipe_config->has_dp_encoder = true;
+
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
+ if (!HAS_PCH_SPLIT(dev))
+ intel_gmch_panel_fitting(intel_crtc, pipe_config,
+ intel_connector->panel.fitting_mode);
+ else
+ intel_pch_panel_fitting(intel_crtc, pipe_config,
+ intel_connector->panel.fitting_mode);
+ }
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return false;
+
+ DRM_DEBUG_KMS("DP link computation with max lane count %i "
+ "max bw %02x pixel clock %iKHz\n",
+ max_lane_count, bws[max_clock], adjusted_mode->clock);
+
+ /* Walk through all bpp values. Luckily they're all nicely spaced with 2
+ * bpc in between. */
+ bpp = pipe_config->pipe_bpp;
+ if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
+ bpp = min(bpp, dev_priv->vbt.edp_bpp);
+
+ for (; bpp >= 6*3; bpp -= 2*3) {
+ mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
+
+ for (clock = 0; clock <= max_clock; clock++) {
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
+ link_avail = intel_dp_max_data_rate(link_clock,
+ lane_count);
+
+ if (mode_rate <= link_avail) {
+ goto found;
+ }
+ }
+ }
+ }
+
+ return false;
+
+found:
+ if (intel_dp->color_range_auto) {
+ /*
+ * See:
+ * CEA-861-E - 5.1 Default Encoding Parameters
+ * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
+ */
+ if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
+ intel_dp->color_range = DP_COLOR_RANGE_16_235;
+ else
+ intel_dp->color_range = 0;
+ }
+
+ if (intel_dp->color_range)
+ pipe_config->limited_color_range = true;
+
+ intel_dp->link_bw = bws[clock];
+ intel_dp->lane_count = (uint8_t)lane_count;
+ pipe_config->pipe_bpp = bpp;
+ pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+
+ DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ pipe_config->port_clock, bpp);
+ DRM_DEBUG_KMS("DP link bw required %i available %i\n",
+ mode_rate, link_avail);
+
+ intel_link_compute_m_n(bpp, lane_count,
+ adjusted_mode->clock, pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+
+ intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
+
+ return true;
+}
+
+void intel_dp_init_link_config(struct intel_dp *intel_dp)
+{
+ memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ intel_dp->link_configuration[0] = intel_dp->link_bw;
+ intel_dp->link_configuration[1] = intel_dp->lane_count;
+ intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
+ /*
+ * Check for DPCD version > 1.1 and enhanced framing support
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+ intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
+}
+
+static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl &= ~DP_PLL_FREQ_MASK;
+
+ if (crtc->config.port_clock == 162000) {
+ /* For a long time we've carried around a ILK-DevA w/a for the
+ * 160MHz clock. If we're really unlucky, it's still required.
+ */
+ DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
+ dpa_ctl |= DP_PLL_FREQ_160MHZ;
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ } else {
+ dpa_ctl |= DP_PLL_FREQ_270MHZ;
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ }
+
+ I915_WRITE(DP_A, dpa_ctl);
+
+ POSTING_READ(DP_A);
+ udelay(500);
+}
+
+static void
+/* LINTED */
+intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ enum port port = dp_to_dig_port(intel_dp)->port;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+
+ /*
+ * There are four kinds of DP registers:
+ *
+ * IBX PCH
+ * SNB CPU
+ * IVB CPU
+ * CPT PCH
+ *
+ * IBX PCH and CPU are the same for almost everything,
+ * except that the CPU DP PLL is configured in this
+ * register
+ *
+ * CPT PCH is quite different, having many bits moved
+ * to the TRANS_DP_CTL register instead. That
+ * configuration happens (oddly) in ironlake_pch_enable
+ */
+
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+
+ /* Handle DP bits in common between all three register formats */
+ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
+
+ if (intel_dp->has_audio) {
+ DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
+ pipe_name(crtc->pipe));
+ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ intel_dp_init_link_config(intel_dp);
+
+ /* Split out the IBX/CPU vs CPT settings */
+
+ if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ intel_dp->DP |= crtc->pipe << 29;
+ } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
+ if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
+ intel_dp->DP |= intel_dp->color_range;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ if (crtc->pipe == 1)
+ intel_dp->DP |= DP_PIPEB_SELECT;
+ } else {
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+ }
+
+ if (port == PORT_A && !IS_VALLEYVIEW(dev))
+ ironlake_set_pll_cpu_edp(intel_dp);
+}
+
+#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
+ u32 mask,
+ u32 value)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_stat_reg, pp_ctrl_reg;
+
+ pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+ DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
+ mask, value,
+ I915_READ(pp_stat_reg),
+ I915_READ(pp_ctrl_reg));
+
+ if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
+ DRM_ERROR("Panel status timeout: status %08x control %08x\n",
+ I915_READ(pp_stat_reg),
+ I915_READ(pp_ctrl_reg));
+ }
+}
+
+static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power on\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power off time\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power cycle\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 control;
+ u32 pp_ctrl_reg;
+
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+ control = I915_READ(pp_ctrl_reg);
+
+ control &= ~PANEL_UNLOCK_MASK;
+ control |= PANEL_UNLOCK_REGS;
+ return control;
+}
+
+void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+ u32 pp_stat_reg, pp_ctrl_reg;
+
+ if (!is_edp(intel_dp))
+ return;
+ DRM_DEBUG_KMS("Turn eDP VDD on\n");
+
+ if(intel_dp->want_panel_vdd)
+ DRM_ERROR("eDP VDD already requested on");
+
+ intel_dp->want_panel_vdd = true;
+
+ if (ironlake_edp_have_panel_vdd(intel_dp)) {
+ DRM_DEBUG_KMS("eDP VDD already on\n");
+ return;
+ }
+
+ if (!ironlake_edp_have_panel_power(intel_dp))
+ ironlake_wait_panel_power_cycle(intel_dp);
+
+ pp = ironlake_get_pp_control(intel_dp);
+ pp |= EDP_FORCE_VDD;
+
+ pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+ DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+ /*
+ * If the panel wasn't on, delay before accessing aux channel
+ */
+ if (!ironlake_edp_have_panel_power(intel_dp)) {
+ DRM_DEBUG_KMS("eDP was not running\n");
+ msleep(intel_dp->panel_power_up_delay);
+ }
+}
+
+static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+ u32 pp_stat_reg, pp_ctrl_reg;
+
+ /* fix me: crash on mode switch */
+// WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+ if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
+ pp = ironlake_get_pp_control(intel_dp);
+ pp &= ~EDP_FORCE_VDD;
+
+ pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+ msleep(intel_dp->panel_power_down_delay);
+ }
+}
+
+static void ironlake_panel_vdd_work(void *dp)
+{
+ struct intel_dp *intel_dp = (struct intel_dp *)dp;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ mutex_lock(&dev->mode_config.mutex);
+ ironlake_panel_vdd_off_sync(intel_dp);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+{
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
+ if(!intel_dp->want_panel_vdd)
+ DRM_ERROR("eDP VDD not forced on");
+
+ intel_dp->want_panel_vdd = false;
+
+ if (sync) {
+ ironlake_panel_vdd_off_sync(intel_dp);
+ } else {
+ /*
+ * Queue the timer to fire a long
+ * time from now (relative to the power down delay)
+ * to keep the panel power up across a sequence of operations
+ */
+ intel_dp->vdd_worktimer_id = timeout(ironlake_panel_vdd_work, (void *)intel_dp,
+ msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
+ }
+}
+
+void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+ u32 pp_ctrl_reg;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("Turn eDP power on\n");
+
+ if (ironlake_edp_have_panel_power(intel_dp)) {
+ DRM_DEBUG_KMS("eDP power already on\n");
+ return;
+ }
+
+ ironlake_wait_panel_power_cycle(intel_dp);
+
+ pp = ironlake_get_pp_control(intel_dp);
+ if (IS_GEN5(dev)) {
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ }
+
+ pp |= POWER_TARGET_ON;
+ if (!IS_GEN5(dev))
+ pp |= PANEL_POWER_RESET;
+
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+
+ ironlake_wait_panel_on(intel_dp);
+
+ if (IS_GEN5(dev)) {
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+ }
+}
+
+void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+ u32 pp_ctrl_reg;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("Turn eDP power off\n");
+
+ if(!intel_dp->want_panel_vdd)
+ DRM_ERROR("Need VDD to turn off panel");
+
+ pp = ironlake_get_pp_control(intel_dp);
+ pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+
+ intel_dp->want_panel_vdd = false;
+
+ ironlake_wait_panel_off(intel_dp);
+}
+
+void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
+ u32 pp;
+ u32 pp_ctrl_reg;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ DRM_DEBUG_KMS("\n");
+ /*
+ * If we enable the backlight right away following a panel power
+ * on, we may see slight flicker as the panel syncs with the eDP
+ * link. So delay a bit to make sure the image is solid before
+ * allowing it to appear.
+ */
+ msleep(intel_dp->backlight_on_delay);
+ pp = ironlake_get_pp_control(intel_dp);
+ pp |= EDP_BLC_ENABLE;
+
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+
+ intel_panel_enable_backlight(dev, pipe);
+}
+
+void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+ u32 pp_ctrl_reg;
+
+ if (!is_edp(intel_dp))
+ return;
+
+ intel_panel_disable_backlight(dev);
+
+ DRM_DEBUG_KMS("\n");
+ pp = ironlake_get_pp_control(intel_dp);
+ pp &= ~EDP_BLC_ENABLE;
+
+ pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+ msleep(intel_dp->backlight_off_delay);
+}
+
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
+ DRM_DEBUG_KMS("\n");
+ dpa_ctl = I915_READ(DP_A);
+ if (dpa_ctl & DP_PLL_ENABLE)
+ DRM_ERROR("dp pll on, should be off\n");
+ if (dpa_ctl & DP_PORT_EN)
+ DRM_ERROR("dp port still on, should be off\n");
+
+ /* We don't adjust intel_dp->DP while tearing down the link, to
+ * facilitate link retraining (e.g. after hotplug). Hence clear all
+ * enable bits here to ensure that we don't enable too much. */
+ intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_dp->DP |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, intel_dp->DP);
+ POSTING_READ(DP_A);
+ udelay(200);
+}
+
+static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
+ dpa_ctl = I915_READ(DP_A);
+ if ((dpa_ctl & DP_PLL_ENABLE) == 0)
+ DRM_ERROR("dp pll off, should be on\n");
+ if (dpa_ctl & DP_PORT_EN)
+ DRM_ERROR("dp port still on, should be off\n");
+
+ /* We can't rely on the value tracked for the DP register in
+ * intel_dp->DP because link_down must not change that (otherwise link
+ * re-training will fail. */
+ dpa_ctl &= ~DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
+ POSTING_READ(DP_A);
+ udelay(200);
+}
+
+/* If the sink supports it, try to set the power state appropriately */
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+{
+ int ret, i;
+
+ /* Should have a valid DPCD by this point */
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
+ DP_SET_POWER_D3);
+ if (ret != 1)
+ DRM_DEBUG_DRIVER("failed to write sink power state\n");
+ } else {
+ /*
+ * When turning on, we need to retry for 1ms to give the sink
+ * time to wake up.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = intel_dp_aux_native_write_1(intel_dp,
+ DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ msleep(1);
+ }
+ }
+}
+
+static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ enum port port = dp_to_dig_port(intel_dp)->port;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ(intel_dp->output_reg);
+
+ if (!(tmp & DP_PORT_EN))
+ return false;
+
+ if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
+ *pipe = PORT_TO_PIPE(tmp);
+ } else {
+ u32 trans_sel;
+ u32 trans_dp;
+ int i;
+
+ switch (intel_dp->output_reg) {
+ case PCH_DP_B:
+ trans_sel = TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ trans_sel = TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ trans_sel = TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ return true;
+ }
+
+ for_each_pipe(i) {
+ trans_dp = I915_READ(TRANS_DP_CTL(i));
+ if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
+ *pipe = i;
+ return true;
+ }
+ }
+
+ DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
+ intel_dp->output_reg);
+ }
+
+ return true;
+}
+
+static void intel_dp_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ u32 tmp, flags = 0;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = dp_to_dig_port(intel_dp)->port;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+ if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
+ tmp = I915_READ(intel_dp->output_reg);
+ if (tmp & DP_SYNC_HS_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (tmp & DP_SYNC_VS_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ } else {
+ tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+ if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ }
+
+ pipe_config->adjusted_mode.flags |= flags;
+}
+
+static void intel_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ enum port port = dp_to_dig_port(intel_dp)->port;
+ struct drm_device *dev = encoder->base.dev;
+
+ /* Make sure the panel is off before trying to change the mode. But also
+ * ensure that we have vdd while we switch off the panel. */
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_backlight_off(intel_dp);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ ironlake_edp_panel_off(intel_dp);
+
+ /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
+ if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
+ intel_dp_link_down(intel_dp);
+}
+
+static void intel_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ enum port port = dp_to_dig_port(intel_dp)->port;
+ struct drm_device *dev = encoder->base.dev;
+
+ if (port == PORT_A || IS_VALLEYVIEW(dev)) {
+ intel_dp_link_down(intel_dp);
+ if (!IS_VALLEYVIEW(dev))
+ ironlake_edp_pll_off(intel_dp);
+ }
+}
+
+static void intel_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+ if (dp_reg & DP_PORT_EN) {
+ DRM_ERROR("DP already enabled");
+ return;
+ }
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_start_link_train(intel_dp);
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
+ intel_dp_complete_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+ ironlake_edp_backlight_on(intel_dp);
+
+ if (IS_VALLEYVIEW(dev)) {
+ struct intel_digital_port *dport =
+ enc_to_dig_port(&encoder->base);
+ int channel = vlv_dport_to_channel(dport);
+
+ vlv_wait_port_ready(dev_priv, channel);
+ }
+}
+
+static void intel_pre_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dport->port == PORT_A && !IS_VALLEYVIEW(dev))
+ ironlake_edp_pll_on(intel_dp);
+
+ if (IS_VALLEYVIEW(dev)) {
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ int port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ u32 val;
+
+ val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+ val = 0;
+ if (pipe)
+ val |= (1<<21);
+ else
+ val &= ~(1<<21);
+ val |= 0x001000c4;
+ vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+
+ vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
+ 0x00760018);
+ vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
+ 0x00400888);
+ }
+}
+
+static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int port = vlv_dport_to_channel(dport);
+
+ if (!IS_VALLEYVIEW(dev))
+ return;
+
+ /* Program Tx lane resets to default */
+ vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
+ DPIO_PCS_TX_LANE2_RESET |
+ DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
+ DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+ DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+ (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
+ DPIO_PCS_CLK_SOFT_RESET);
+
+ /* Fix up inter-pair skew failure */
+ vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
+ vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
+ uint8_t *recv, int recv_bytes)
+{
+ int ret, i;
+
+ /*
+ * Sinks are *supposed* to come up within 1ms from an off state,
+ * but we're also supposed to retry 3 times per the spec.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = intel_dp_aux_native_read(intel_dp, address, recv,
+ recv_bytes);
+ if (ret == recv_bytes)
+ return true;
+ msleep(1);
+ }
+
+ return false;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ return intel_dp_aux_native_read_retry(intel_dp,
+ DP_LANE0_1_STATUS,
+ link_status,
+ DP_LINK_STATUS_SIZE);
+}
+
+#if 0
+static char *voltage_names[] = {
+ "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+ "0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char *link_train_names[] = {
+ "pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+/*
+ * These are source-specific values; current Intel hardware supports
+ * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
+ */
+
+static uint8_t
+intel_dp_voltage_max(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ enum port port = dp_to_dig_port(intel_dp)->port;
+
+ if (IS_VALLEYVIEW(dev))
+ return DP_TRAIN_VOLTAGE_SWING_1200;
+ else if (IS_GEN7(dev) && port == PORT_A)
+ return DP_TRAIN_VOLTAGE_SWING_800;
+ else if (HAS_PCH_CPT(dev) && port != PORT_A)
+ return DP_TRAIN_VOLTAGE_SWING_1200;
+ else
+ return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ enum port port = dp_to_dig_port(intel_dp)->port;
+
+ if (HAS_DDI(dev)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_9_5;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else if (IS_VALLEYVIEW(dev)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_9_5;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else if (IS_GEN7(dev) && port == PORT_A) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ }
+}
+
+static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ unsigned long demph_reg_value, preemph_reg_value,
+ uniqtranscale_reg_value;
+ uint8_t train_set = intel_dp->train_set[0];
+ int port = vlv_dport_to_channel(dport);
+
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPHASIS_0:
+ preemph_reg_value = 0x0004000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ demph_reg_value = 0x2B405555;
+ uniqtranscale_reg_value = 0x552AB83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ demph_reg_value = 0x2B404040;
+ uniqtranscale_reg_value = 0x5548B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ demph_reg_value = 0x2B245555;
+ uniqtranscale_reg_value = 0x5560B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ demph_reg_value = 0x2B405555;
+ uniqtranscale_reg_value = 0x5598DA3A;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_3_5:
+ preemph_reg_value = 0x0002000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ demph_reg_value = 0x2B404040;
+ uniqtranscale_reg_value = 0x5552B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ demph_reg_value = 0x2B404848;
+ uniqtranscale_reg_value = 0x5580B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ demph_reg_value = 0x2B404040;
+ uniqtranscale_reg_value = 0x55ADDA3A;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_6:
+ preemph_reg_value = 0x0000000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ demph_reg_value = 0x2B305555;
+ uniqtranscale_reg_value = 0x5570B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ demph_reg_value = 0x2B2B4040;
+ uniqtranscale_reg_value = 0x55ADDA3A;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_9_5:
+ preemph_reg_value = 0x0006000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ demph_reg_value = 0x1B405555;
+ uniqtranscale_reg_value = 0x55ADDA3A;
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
+ return 0;
+ }
+
+ vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
+ vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
+ vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
+ uniqtranscale_reg_value);
+ vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
+ vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
+ vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
+ vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
+
+ return 0;
+}
+
+static void
+intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ uint8_t v = 0;
+ uint8_t p = 0;
+ int lane;
+ uint8_t voltage_max;
+ uint8_t preemph_max;
+
+ for (lane = 0; lane < intel_dp->lane_count; lane++) {
+ uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+
+ if (this_v > v)
+ v = this_v;
+ if (this_p > p)
+ p = this_p;
+ }
+
+ voltage_max = intel_dp_voltage_max(intel_dp);
+ if (v >= voltage_max)
+ v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
+
+ preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+ if (p >= preemph_max)
+ p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (lane = 0; lane < 4; lane++)
+ intel_dp->train_set[lane] = v | p;
+}
+
+static uint32_t
+intel_gen4_signal_levels(uint8_t train_set)
+{
+ uint32_t signal_levels = 0;
+
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ default:
+ signal_levels |= DP_VOLTAGE_0_4;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ signal_levels |= DP_VOLTAGE_0_6;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ signal_levels |= DP_VOLTAGE_0_8;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ signal_levels |= DP_VOLTAGE_1_2;
+ break;
+ }
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPHASIS_0:
+ default:
+ signal_levels |= DP_PRE_EMPHASIS_0;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_3_5:
+ signal_levels |= DP_PRE_EMPHASIS_3_5;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_6:
+ signal_levels |= DP_PRE_EMPHASIS_6;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_9_5:
+ signal_levels |= DP_PRE_EMPHASIS_9_5;
+ break;
+ }
+ return signal_levels;
+}
+
+/* Gen6's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen6_edp_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+ }
+}
+
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_600MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_500MV_0DB_IVB;
+ }
+}
+
+/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_hsw_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_400MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_400MV_3_5DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_400MV_6DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
+ return DDI_BUF_EMP_400MV_9_5DB_HSW;
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_600MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_600MV_3_5DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+ return DDI_BUF_EMP_600MV_6DB_HSW;
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return DDI_BUF_EMP_800MV_0DB_HSW;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return DDI_BUF_EMP_800MV_3_5DB_HSW;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return DDI_BUF_EMP_400MV_0DB_HSW;
+ }
+}
+
+/* Properly updates "DP" with the correct signal levels. */
+static void
+intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ enum port port = intel_dig_port->port;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ uint32_t signal_levels, mask;
+ uint8_t train_set = intel_dp->train_set[0];
+
+ if (HAS_DDI(dev)) {
+ signal_levels = intel_hsw_signal_levels(train_set);
+ mask = DDI_BUF_EMP_MASK;
+ } else if (IS_VALLEYVIEW(dev)) {
+ signal_levels = intel_vlv_signal_levels(intel_dp);
+ mask = 0;
+ } else if (IS_GEN7(dev) && port == PORT_A) {
+ signal_levels = intel_gen7_edp_signal_levels(train_set);
+ mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
+ } else if (IS_GEN6(dev) && port == PORT_A) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set);
+ mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
+ } else {
+ signal_levels = intel_gen4_signal_levels(train_set);
+ mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
+ }
+
+ DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
+
+ *DP = (*DP & ~mask) | signal_levels;
+}
+
+static bool
+intel_dp_set_link_train(struct intel_dp *intel_dp,
+ uint32_t dp_reg_value,
+ uint8_t dp_train_pat)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ int ret;
+
+ if (HAS_DDI(dev)) {
+ uint32_t temp = I915_READ(DP_TP_CTL(port));
+
+ if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+ temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+ else
+ temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+
+ break;
+ case DP_TRAINING_PATTERN_1:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+ break;
+ }
+ I915_WRITE(DP_TP_CTL(port), temp);
+
+ } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
+ dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ }
+
+ } else {
+ dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+
+ switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dp_reg_value |= DP_LINK_TRAIN_OFF;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ DRM_ERROR("DP training pattern 3 not supported\n");
+ dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+ break;
+ }
+ }
+
+ I915_WRITE(intel_dp->output_reg, dp_reg_value);
+ POSTING_READ(intel_dp->output_reg);
+
+ intel_dp_aux_native_write_1(intel_dp,
+ DP_TRAINING_PATTERN_SET,
+ dp_train_pat);
+
+ if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+ DP_TRAINING_PATTERN_DISABLE) {
+ ret = intel_dp_aux_native_write(intel_dp,
+ DP_TRAINING_LANE0_SET,
+ intel_dp->train_set,
+ intel_dp->lane_count);
+ if (ret != intel_dp->lane_count)
+ return false;
+ }
+
+ return true;
+}
+
+static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ uint32_t val;
+
+ if (!HAS_DDI(dev))
+ return;
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ val |= DP_TP_CTL_LINK_TRAIN_IDLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ /*
+ * On PORT_A we can have only eDP in SST mode. There the only reason
+ * we need to set idle transmission mode is to work around a HW issue
+ * where we enable the pipe while not in idle link-training mode.
+ * In this case there is requirement to wait for a minimum number of
+ * idle patterns to be sent.
+ */
+ if (port == PORT_A)
+ return;
+
+ if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
+ 1))
+ DRM_ERROR("Timed out waiting for DP idle patterns\n");
+}
+
+/* Enable corresponding port and start training pattern 1 */
+void
+intel_dp_start_link_train(struct intel_dp *intel_dp)
+{
+ struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
+ struct drm_device *dev = encoder->dev;
+ int i;
+ uint8_t voltage;
+ /* LINTED */
+ bool clock_recovery = false;
+ int voltage_tries, loop_tries;
+ uint32_t DP = intel_dp->DP;
+
+ if (HAS_DDI(dev))
+ intel_ddi_prepare_link_retrain(encoder);
+
+ /* Write the link configuration data */
+ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+ intel_dp->link_configuration,
+ DP_LINK_CONFIGURATION_SIZE);
+
+ DP |= DP_PORT_EN;
+
+ memset(intel_dp->train_set, 0, 4);
+ voltage = 0xff;
+ voltage_tries = 0;
+ loop_tries = 0;
+ clock_recovery = false;
+ for (;;) {
+ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+ intel_dp_set_signal_levels(intel_dp, &DP);
+
+ /* Set training pattern 1 */
+ if (!intel_dp_set_link_train(intel_dp, DP,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE))
+ break;
+
+ drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ DRM_ERROR("failed to get link status\n");
+ break;
+ }
+
+ if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("clock recovery OK\n");
+ clock_recovery = true;
+ break;
+ }
+
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < intel_dp->lane_count; i++)
+ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ if (i == intel_dp->lane_count) {
+ ++loop_tries;
+ if (loop_tries == 5) {
+ DRM_DEBUG_KMS("too many full retries, give up\n");
+ break;
+ }
+ memset(intel_dp->train_set, 0, 4);
+ voltage_tries = 0;
+ continue;
+ }
+
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++voltage_tries;
+ if (voltage_tries == 5) {
+ DRM_DEBUG_KMS("too many voltage retries, give up\n");
+ break;
+ }
+ } else
+ voltage_tries = 0;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new train_set as requested by target */
+ intel_get_adjust_train(intel_dp, link_status);
+ }
+
+ intel_dp->DP = DP;
+}
+
+void
+intel_dp_complete_link_train(struct intel_dp *intel_dp)
+{
+ bool channel_eq = false;
+ int tries, cr_tries;
+ uint32_t DP = intel_dp->DP;
+
+ /* channel equalization */
+ tries = 0;
+ cr_tries = 0;
+ channel_eq = false;
+ for (;;) {
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+ if (cr_tries > 5) {
+ DRM_ERROR("failed to train DP, aborting\n");
+ intel_dp_link_down(intel_dp);
+ break;
+ }
+
+ intel_dp_set_signal_levels(intel_dp, &DP);
+
+ /* channel eq pattern */
+ if (!intel_dp_set_link_train(intel_dp, DP,
+ DP_TRAINING_PATTERN_2 |
+ DP_LINK_SCRAMBLING_DISABLE))
+ break;
+
+ drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
+ if (!intel_dp_get_link_status(intel_dp, link_status))
+ break;
+
+ /* Make sure clock is still ok */
+ if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ intel_dp_start_link_train(intel_dp);
+ cr_tries++;
+ continue;
+ }
+
+ if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
+ channel_eq = true;
+ break;
+ }
+
+ /* Try 5 times, then try clock recovery if that fails */
+ if (tries > 5) {
+ intel_dp_link_down(intel_dp);
+ intel_dp_start_link_train(intel_dp);
+ tries = 0;
+ cr_tries++;
+ continue;
+ }
+
+ /* Compute new train_set as requested by target */
+ intel_get_adjust_train(intel_dp, link_status);
+ ++tries;
+ }
+
+ intel_dp_set_idle_link_train(intel_dp);
+
+ intel_dp->DP = DP;
+
+ if (channel_eq)
+ DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
+
+}
+
+void intel_dp_stop_link_train(struct intel_dp *intel_dp)
+{
+ intel_dp_set_link_train(intel_dp, intel_dp->DP,
+ DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+intel_dp_link_down(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ enum port port = intel_dig_port->port;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(intel_dig_port->base.base.crtc);
+ uint32_t DP = intel_dp->DP;
+
+ /*
+ * DDI code has a strict mode set sequence and we should try to respect
+ * it, otherwise we might hang the machine in many different ways. So we
+ * really should be disabling the port only on a complete crtc_disable
+ * sequence. This function is just called under two conditions on DDI
+ * code:
+ * - Link train failed while doing crtc_enable, and on this case we
+ * really should respect the mode set sequence and wait for a
+ * crtc_disable.
+ * - Someone turned the monitor off and intel_dp_check_link_status
+ * called us. We don't need to disable the whole port on this case, so
+ * when someone turns the monitor on again,
+ * intel_ddi_prepare_link_retrain will take care of redoing the link
+ * train.
+ */
+ if (HAS_DDI(dev))
+ return;
+
+ if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+ } else {
+ DP &= ~DP_LINK_TRAIN_MASK;
+ I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ }
+ POSTING_READ(intel_dp->output_reg);
+
+ /* We don't really know why we're doing this */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ if (HAS_PCH_IBX(dev) &&
+ I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
+ struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+
+ /* Hardware workaround: leaving our transcoder select
+ * set to transcoder B while it's off will prevent the
+ * corresponding HDMI output on transcoder A.
+ *
+ * Combine this with another hardware workaround:
+ * transcoder select bit can only be cleared while the
+ * port is enabled.
+ */
+ DP &= ~DP_PIPEB_SELECT;
+ I915_WRITE(intel_dp->output_reg, DP);
+
+ /* Changes to enable or select take place the vblank
+ * after being written.
+ */
+ if (crtc == NULL) {
+ /* We should never try to disable a port without a crtc
+ * attached. For paranoia keep the code around for a
+ * bit. */
+ POSTING_READ(intel_dp->output_reg);
+ msleep(50);
+ } else
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
+
+ DP &= ~DP_AUDIO_OUTPUT_ENABLE;
+ I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(intel_dp->output_reg);
+ msleep(intel_dp->panel_power_down_delay);
+}
+
+static bool
+intel_dp_get_dpcd(struct intel_dp *intel_dp)
+{
+// char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
+
+ if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
+ sizeof(intel_dp->dpcd)) == 0)
+ return false; /* aux transfer failed */
+/*
+ hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
+ 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+ DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+*/
+ if (intel_dp->dpcd[DP_DPCD_REV] == 0)
+ return false; /* DPCD not present */
+
+ if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_PRESENT))
+ return true; /* native DP sink */
+
+ if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
+ return true; /* no per-port downstream info */
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
+ intel_dp->downstream_ports,
+ DP_MAX_DOWNSTREAM_PORTS) == 0)
+ return false; /* downstream port status fetch failed */
+
+ return true;
+}
+
+static void
+intel_dp_probe_oui(struct intel_dp *intel_dp)
+{
+ u8 buf[3];
+
+ if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+ return;
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
+ DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
+ DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+}
+
+static bool
+intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+ int ret;
+
+ ret = intel_dp_aux_native_read_retry(intel_dp,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector, 1);
+ if (!ret)
+ return false;
+
+ return true;
+}
+
+static void
+intel_dp_handle_test_request(struct intel_dp *intel_dp)
+{
+ /* NAK by default */
+ (void) intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
+}
+
+/*
+ * According to DP spec
+ * 5.1.2:
+ * 1. Read DPCD
+ * 2. Configure link according to Receiver Capabilities
+ * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
+ * 4. Check link status on receipt of hot-plug interrupt
+ */
+
+void
+intel_dp_check_link_status(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+ u8 sink_irq_vector;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!intel_encoder->connectors_active)
+ return;
+
+ if (!intel_encoder->base.crtc)
+ return;
+
+ /* Try to read receiver status if the link appears to be up */
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ intel_dp_link_down(intel_dp);
+ return;
+ }
+
+ /* Now read the DPCD to see if it's actually running */
+ if (!intel_dp_get_dpcd(intel_dp)) {
+ intel_dp_link_down(intel_dp);
+ return;
+ }
+
+ /* Try to read the source of the interrupt */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+ /* Clear interrupt source */
+ (void) intel_dp_aux_native_write_1(intel_dp,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector);
+
+ if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
+ intel_dp_handle_test_request(intel_dp);
+ if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
+ DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
+ }
+
+ if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
+ drm_get_encoder_name(&intel_encoder->base));
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+ }
+}
+
+/* XXX this is probably wrong for multiple downstream ports */
+static enum drm_connector_status
+intel_dp_detect_dpcd(struct intel_dp *intel_dp)
+{
+ uint8_t *dpcd = intel_dp->dpcd;
+ bool hpd;
+ uint8_t type;
+
+ if (!intel_dp_get_dpcd(intel_dp))
+ return connector_status_disconnected;
+
+ /* if there's no downstream port, we're done */
+ if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+ return connector_status_connected;
+
+ /* If we're HPD-aware, SINK_COUNT changes dynamically */
+ hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
+ if (hpd) {
+ uint8_t reg;
+ if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
+ &reg, 1))
+ return connector_status_unknown;
+ return DP_GET_SINK_COUNT(reg) ? connector_status_connected
+ : connector_status_disconnected;
+ }
+
+ /* If no HPD, poke DDC gently */
+ if (drm_probe_ddc(&intel_dp->adapter))
+ return connector_status_connected;
+
+ /* Well we tried, say unknown for unreliable port types */
+ type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+ if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
+ return connector_status_unknown;
+
+ /* Anything else is out of spec, warn and ignore */
+ DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
+ return connector_status_disconnected;
+}
+
+static enum drm_connector_status
+ironlake_dp_detect(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ enum drm_connector_status status;
+
+ /* Can't disconnect eDP */
+ if (is_edp(intel_dp)) {
+ status = intel_panel_detect(dev);
+ if (status == connector_status_unknown)
+ status = connector_status_connected;
+ return status;
+ }
+
+ if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+ return connector_status_disconnected;
+
+ return intel_dp_detect_dpcd(intel_dp);
+}
+
+static enum drm_connector_status
+g4x_dp_detect(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ uint32_t bit;
+
+ /* Can't disconnect eDP, but you can close the lid... */
+ if (is_edp(intel_dp)) {
+ enum drm_connector_status status;
+
+ status = intel_panel_detect(dev);
+ if (status == connector_status_unknown)
+ status = connector_status_connected;
+ return status;
+ }
+
+ switch (intel_dig_port->port) {
+ case PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS;
+ break;
+ case PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS;
+ break;
+ case PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS;
+ break;
+ default:
+ return connector_status_unknown;
+ }
+
+ if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
+ return connector_status_disconnected;
+
+ return intel_dp_detect_dpcd(intel_dp);
+}
+
+static struct edid *
+intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ struct edid *edid;
+ int size;
+
+ /* invalid edid */
+ if (IS_ERR(intel_connector->edid))
+ return NULL;
+
+ size = EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1);
+ edid = kmalloc(size, GFP_KERNEL);
+ if (!edid)
+ return NULL;
+
+ memcpy(edid, intel_connector->edid, size);
+ return edid;
+ }
+
+ return drm_get_edid(connector, adapter);
+}
+
+static int
+intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ /* invalid edid */
+ if (IS_ERR(intel_connector->edid))
+ return 0;
+
+ return intel_connector_update_modes(connector,
+ intel_connector->edid);
+ }
+
+ return intel_ddc_get_modes(connector, adapter);
+}
+
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status status;
+ struct edid *edid = NULL;
+
+ intel_dp->has_audio = false;
+
+ if (HAS_PCH_SPLIT(dev))
+ status = ironlake_dp_detect(intel_dp);
+ else
+ status = g4x_dp_detect(intel_dp);
+
+ if (status != connector_status_connected)
+ return status;
+
+ intel_dp_probe_oui(intel_dp);
+
+ if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
+ intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
+ } else {
+ edid = intel_dp_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ intel_dp->has_audio = drm_detect_monitor_audio(edid);
+ kfree(edid, EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1));
+ }
+ }
+
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ return connector_status_connected;
+}
+
+static int intel_dp_get_modes(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
+ int ret;
+
+ /* We should parse the EDID data and find out if it has an audio sink
+ */
+
+ ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
+ if (ret)
+ return ret;
+
+ /* if eDP has no EDID, fall back to fixed mode */
+ if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(dev,
+ intel_connector->panel.fixed_mode);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static bool
+intel_dp_detect_audio(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct edid *edid;
+ bool has_audio = false;
+
+ edid = intel_dp_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ has_audio = drm_detect_monitor_audio(edid);
+ kfree(edid, EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1));
+ }
+
+ return has_audio;
+}
+
+static int
+intel_dp_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ int ret;
+
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = (int)val;
+ bool has_audio;
+
+ if (i == intel_dp->force_audio)
+ return 0;
+
+ intel_dp->force_audio = i;
+
+ if (i == HDMI_AUDIO_AUTO)
+ has_audio = intel_dp_detect_audio(connector);
+ else
+ has_audio = (i == HDMI_AUDIO_ON);
+
+ if (has_audio == intel_dp->has_audio)
+ return 0;
+
+ intel_dp->has_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ bool old_auto = intel_dp->color_range_auto;
+ uint32_t old_range = intel_dp->color_range;
+
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_dp->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_dp->color_range_auto = false;
+ intel_dp->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_dp->color_range_auto = false;
+ intel_dp->color_range = DP_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (old_auto == intel_dp->color_range_auto &&
+ old_range == intel_dp->color_range)
+ return 0;
+
+ goto done;
+ }
+
+ if (is_edp(intel_dp) &&
+ property == connector->dev->mode_config.scaling_mode_property) {
+ if (val == DRM_MODE_SCALE_NONE) {
+ DRM_DEBUG_KMS("no scaling not supported\n");
+ return -EINVAL;
+ }
+
+ if (intel_connector->panel.fitting_mode == val) {
+ /* the eDP scaling property is not changed */
+ return 0;
+ }
+ intel_connector->panel.fitting_mode = (int) val;
+
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (intel_encoder->base.crtc)
+ intel_crtc_restore_mode(intel_encoder->base.crtc);
+
+ return 0;
+}
+
+static void
+intel_dp_connector_destroy(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (!(IS_ERR(intel_connector->edid) || !intel_connector->edid))
+ kfree(intel_connector->edid, EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1));
+
+ /* Can't call is_edp() since the encoder may have been destroyed
+ * already. */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ intel_panel_fini(&intel_connector->panel);
+
+ drm_connector_cleanup(connector);
+ kfree(intel_connector, sizeof(struct intel_connector));
+}
+
+void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+// i2c_del_adapter(&intel_dp->adapter);
+ drm_encoder_cleanup(encoder);
+ if (is_edp(intel_dp)) {
+ if (intel_dp->vdd_worktimer_id != NULL) {
+ (void) untimeout(intel_dp->vdd_worktimer_id);
+ intel_dp->vdd_worktimer_id = NULL;
+ }
+ mutex_lock(&dev->mode_config.mutex);
+ ironlake_panel_vdd_off_sync(intel_dp);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
+ kfree(intel_dig_port, sizeof(*intel_dig_port));
+}
+
+static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
+ .mode_set = intel_dp_mode_set,
+};
+
+static const struct drm_connector_funcs intel_dp_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_dp_set_property,
+ .destroy = intel_dp_connector_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
+ .get_modes = intel_dp_get_modes,
+ .mode_valid = intel_dp_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+ .destroy = intel_dp_encoder_destroy,
+};
+
+static void
+intel_dp_hot_plug(struct intel_encoder *intel_encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+ intel_dp_check_link_status(intel_dp);
+}
+
+/* Return which DP Port should be selected for Transcoder DP control */
+int
+intel_trans_dp_port_sel (struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp *intel_dp;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
+ return intel_dp->output_reg;
+ }
+
+ return -1;
+}
+
+/* check the VBT to see whether the eDP is on DP-D port */
+bool intel_dpd_is_edp(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ p_child = dev_priv->vbt.child_dev + i;
+
+ if (p_child->dvo_port == PORT_IDPD &&
+ p_child->device_type == DEVICE_TYPE_eDP)
+ return true;
+ }
+ return false;
+}
+
+static void
+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+ intel_dp->color_range_auto = true;
+
+ if (is_edp(intel_dp)) {
+ drm_mode_create_scaling_mode_property(connector->dev);
+ drm_object_attach_property(
+ &connector->base,
+ connector->dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_ASPECT);
+ intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+ }
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *out)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct edp_power_seq cur, vbt, spec, final;
+ u32 pp_on, pp_off, pp_div, pp;
+ int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ pp_control_reg = PCH_PP_CONTROL;
+ pp_on_reg = PCH_PP_ON_DELAYS;
+ pp_off_reg = PCH_PP_OFF_DELAYS;
+ pp_div_reg = PCH_PP_DIVISOR;
+ } else {
+ pp_control_reg = PIPEA_PP_CONTROL;
+ pp_on_reg = PIPEA_PP_ON_DELAYS;
+ pp_off_reg = PIPEA_PP_OFF_DELAYS;
+ pp_div_reg = PIPEA_PP_DIVISOR;
+ }
+
+ /* Workaround: Need to write PP_CONTROL with the unlock key as
+ * the very first thing. */
+ pp = ironlake_get_pp_control(intel_dp);
+ I915_WRITE(pp_control_reg, pp);
+
+ pp_on = I915_READ(pp_on_reg);
+ pp_off = I915_READ(pp_off_reg);
+ pp_div = I915_READ(pp_div_reg);
+
+ /* Pull timing values out of registers */
+ cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
+
+ cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
+
+ cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+ cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
+
+ cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+
+ DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+ vbt = dev_priv->vbt.edp_pps;
+
+ /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+ * our hw here, which are all in 100usec. */
+ spec.t1_t3 = 210 * 10;
+ spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+ spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+ spec.t10 = 500 * 10;
+ /* This one is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ spec.t11_t12 = (510 + 100) * 10;
+
+ DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+
+ /* Use the max of the register settings and vbt. If both are
+ * unset, fall back to the spec limits. */
+#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
+ spec.field : \
+ max(cur.field, vbt.field))
+ assign_final(t1_t3);
+ assign_final(t8);
+ assign_final(t9);
+ assign_final(t10);
+ assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
+ intel_dp->panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->backlight_on_delay = get_delay(t8);
+ intel_dp->backlight_off_delay = get_delay(t9);
+ intel_dp->panel_power_down_delay = get_delay(t10);
+ intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+ DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
+
+ DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+ if (out)
+ *out = final;
+}
+
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *seq)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_on, pp_off, pp_div, port_sel = 0;
+ int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
+ int pp_on_reg, pp_off_reg, pp_div_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ pp_on_reg = PCH_PP_ON_DELAYS;
+ pp_off_reg = PCH_PP_OFF_DELAYS;
+ pp_div_reg = PCH_PP_DIVISOR;
+ } else {
+ pp_on_reg = PIPEA_PP_ON_DELAYS;
+ pp_off_reg = PIPEA_PP_OFF_DELAYS;
+ pp_div_reg = PIPEA_PP_DIVISOR;
+ }
+
+ /* And finally store the new values in the power sequencer. */
+ pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
+ (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+ pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+ (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
+ /* Compute the divisor for the pp clock, simply match the Bspec
+ * formula. */
+ pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
+ pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
+ << PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+ /* Haswell doesn't have any port selection bits for the panel
+ * power sequencer any more. */
+ if (IS_VALLEYVIEW(dev)) {
+ port_sel = I915_READ(pp_on_reg) & 0xc0000000;
+ } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ if (dp_to_dig_port(intel_dp)->port == PORT_A)
+ port_sel = PANEL_POWER_PORT_DP_A;
+ else
+ port_sel = PANEL_POWER_PORT_DP_D;
+ }
+
+ pp_on |= port_sel;
+
+ I915_WRITE(pp_on_reg, pp_on);
+ I915_WRITE(pp_off_reg, pp_off);
+ I915_WRITE(pp_div_reg, pp_div);
+
+ DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ I915_READ(pp_on_reg),
+ I915_READ(pp_off_reg),
+ I915_READ(pp_div_reg));
+}
+
+static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ struct intel_connector *intel_connector)
+{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *fixed_mode = NULL;
+ struct edp_power_seq power_seq = { 0 };
+ bool has_dpcd;
+ struct drm_display_mode *scan;
+ struct edid *edid;
+
+ if (!is_edp(intel_dp))
+ return true;
+
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+
+ /* Cache DPCD and EDID for edp. */
+ ironlake_edp_panel_vdd_on(intel_dp);
+ has_dpcd = intel_dp_get_dpcd(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+
+ if (has_dpcd) {
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
+ dev_priv->no_aux_handshake =
+ intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+ DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
+ } else {
+ /* if this fails, presume the device is a ghost */
+ DRM_INFO("failed to retrieve link info, disabling eDP\n");
+ return false;
+ }
+
+ /* We now know it's not a ghost, init power sequence regs. */
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ if (drm_add_edid_modes(connector, edid)) {
+ drm_mode_connector_update_edid_property(connector,
+ edid);
+ drm_edid_to_eld(connector, edid);
+ } else {
+ kfree(edid, (EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1)));
+ edid = NULL;
+ }
+ } else {
+ edid = NULL;
+ }
+ intel_connector->edid = edid;
+
+ /* prefer fixed mode from EDID if available */
+ list_for_each_entry(scan, struct drm_display_mode, &connector->probed_modes, head) {
+ if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
+ }
+ }
+
+ /* fallback to VBT if available for eDP */
+ if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
+ fixed_mode = drm_mode_duplicate(dev,
+ dev_priv->vbt.lfp_lvds_vbt_mode);
+ if (fixed_mode)
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+
+ intel_panel_init(&intel_connector->panel, fixed_mode);
+ intel_panel_setup_backlight(connector);
+
+ return true;
+}
+
+bool
+intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
+{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ const char *name = NULL;
+ int type, error;
+
+ /* Preserve the current hw state. */
+ intel_dp->DP = I915_READ(intel_dp->output_reg);
+ intel_dp->attached_connector = intel_connector;
+
+ type = DRM_MODE_CONNECTOR_DisplayPort;
+ /*
+ * FIXME : We need to initialize built-in panels before external panels.
+ * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
+ */
+ switch (port) {
+ case PORT_A:
+ type = DRM_MODE_CONNECTOR_eDP;
+ break;
+ case PORT_C:
+ if (IS_VALLEYVIEW(dev))
+ type = DRM_MODE_CONNECTOR_eDP;
+ break;
+ case PORT_D:
+ if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
+ type = DRM_MODE_CONNECTOR_eDP;
+ break;
+ default: /* silence GCC warning */
+ break;
+ }
+
+ /*
+ * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
+ * for DP the encoder type can be set by the caller to
+ * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
+ */
+ if (type == DRM_MODE_CONNECTOR_eDP)
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+
+ DRM_DEBUG_KMS("Adding %s connector on port %c\n",
+ type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
+ port_name(port));
+
+ (void) drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
+ drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = 0;
+
+ intel_dp->vdd_worktimer_id = NULL;
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+
+ if (HAS_DDI(dev))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
+ if (HAS_DDI(dev)) {
+ switch (intel_dig_port->port) {
+ case PORT_A:
+ intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
+ break;
+ case PORT_B:
+ intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
+ break;
+ case PORT_C:
+ intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
+ break;
+ case PORT_D:
+ intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ /* Set up the DDC bus. */
+ switch (port) {
+ case PORT_A:
+ intel_encoder->hpd_pin = HPD_PORT_A;
+ name = "DPDDC-A";
+ break;
+ case PORT_B:
+ intel_encoder->hpd_pin = HPD_PORT_B;
+ name = "DPDDC-B";
+ break;
+ case PORT_C:
+ intel_encoder->hpd_pin = HPD_PORT_C;
+ name = "DPDDC-C";
+ break;
+ case PORT_D:
+ intel_encoder->hpd_pin = HPD_PORT_D;
+ name = "DPDDC-D";
+ break;
+ default:
+ BUG();
+ }
+
+ error = intel_dp_i2c_init(intel_dp, intel_connector, name);
+ if(error)
+ DRM_ERROR("intel_dp_i2c_init failed with error %d for port %c\n",
+ error, port_name(port));
+
+ if (!intel_edp_init_connector(intel_dp, intel_connector)) {
+ //i2c_del_adapter(&intel_dp->adapter);
+ if (is_edp(intel_dp)) {
+ if (intel_dp->vdd_worktimer_id != NULL) {
+ (void) untimeout(intel_dp->vdd_worktimer_id);
+ intel_dp->vdd_worktimer_id = NULL;
+ }
+ mutex_lock(&dev->mode_config.mutex);
+ ironlake_panel_vdd_off_sync(intel_dp);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
+ drm_connector_cleanup(connector);
+ return false;
+ }
+
+ intel_dp_add_properties(intel_dp, connector);
+
+ /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ if (IS_G4X(dev) && !IS_GM45(dev)) {
+ u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+ I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ }
+
+ return true;
+}
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+
+ intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dig_port, sizeof(struct intel_digital_port));
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+ encoder = &intel_encoder->base;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+
+ intel_encoder->compute_config = intel_dp_compute_config;
+ intel_encoder->enable = intel_enable_dp;
+ intel_encoder->pre_enable = intel_pre_enable_dp;
+ intel_encoder->disable = intel_disable_dp;
+ intel_encoder->post_disable = intel_post_disable_dp;
+ intel_encoder->get_hw_state = intel_dp_get_hw_state;
+ intel_encoder->get_config = intel_dp_get_config;
+ if (IS_VALLEYVIEW(dev))
+ intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
+
+ intel_dig_port->port = port;
+ intel_dig_port->dp.output_reg = output_reg;
+
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+ intel_encoder->hot_plug = intel_dp_hot_plug;
+
+ if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
+ drm_encoder_cleanup(encoder);
+ kfree(intel_dig_port, sizeof(struct intel_digital_port));
+ kfree(intel_connector, sizeof (struct intel_connector));
+ }
+}
diff --git a/usr/src/uts/intel/io/i915/intel_drv.h b/usr/src/uts/intel/io/i915/intel_drv.h
new file mode 100644
index 0000000..0faec2b
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_drv.h
@@ -0,0 +1,806 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
+
+#include "i915_drv.h"
+#include "i915_drm.h"
+#include "drm_crtc.h"
+#include "drm_sun_i2c.h" /* OSOL_i915 */
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "drm_dp_helper.h"
+
+#define MSLEEP(x) do { \
+ if (in_dbg_master()) \
+ mdelay(x); \
+ else \
+ msleep(x); \
+} while(0)
+
+#define KHz(x) (1000*x)
+#define MHz(x) KHz(1000*x)
+
+/*
+ * Display related stuff
+ */
+
+/* store information about an Ixxx DVO */
+/* The i830->i865 use multiple DVOs with multiple i2cs */
+/* the i915, i945 have a single sDVO i2c bus - which is different */
+#define MAX_OUTPUTS 6
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+#define INTEL_I2C_BUS_DVO 1
+#define INTEL_I2C_BUS_SDVO 2
+
+/* these are outputs from the chip - integrated only
+ external chips are via DVO or SDVO output */
+#define INTEL_OUTPUT_UNUSED 0
+#define INTEL_OUTPUT_ANALOG 1
+#define INTEL_OUTPUT_DVO 2
+#define INTEL_OUTPUT_SDVO 3
+#define INTEL_OUTPUT_LVDS 4
+#define INTEL_OUTPUT_TVOUT 5
+#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_DISPLAYPORT 7
+#define INTEL_OUTPUT_EDP 8
+#define INTEL_OUTPUT_UNKNOWN 9
+
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
+struct intel_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_i915_gem_object *obj;
+};
+
+struct intel_fbdev {
+ struct drm_fb_helper helper;
+ struct intel_framebuffer ifb;
+ struct list_head fbdev_list;
+ struct drm_display_mode *our_mode;
+};
+
+struct intel_encoder {
+ struct drm_encoder base;
+ /*
+ * The new crtc this encoder will be driven from. Only differs from
+ * base->crtc while a modeset is in progress.
+ */
+ struct intel_crtc *new_crtc;
+
+ int type;
+ int type_size;
+ /*
+ * Intel hw has only one MUX where encoders could be clone, hence a
+ * simple flag is enough to compute the possible_clones mask.
+ */
+ bool cloneable;
+ bool connectors_active;
+ void (*hot_plug)(struct intel_encoder *);
+ bool (*compute_config)(struct intel_encoder *,
+ struct intel_crtc_config *);
+ void (*pre_pll_enable)(struct intel_encoder *);
+ void (*pre_enable)(struct intel_encoder *);
+ void (*enable)(struct intel_encoder *);
+ void (*mode_set)(struct intel_encoder *intel_encoder);
+ void (*disable)(struct intel_encoder *);
+ void (*post_disable)(struct intel_encoder *);
+ /* Read out the current hw state of this connector, returning true if
+ * the encoder is active. If the encoder is enabled it also set the pipe
+ * it is connected to in the pipe parameter. */
+ bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
+ /* Reconstructs the equivalent mode flags for the current hardware
+ * state. This must be called _after_ display->get_pipe_config has
+ * pre-filled the pipe config. Note that intel_encoder->base.crtc must
+ * be set correctly before calling this function. */
+ void (*get_config)(struct intel_encoder *,
+ struct intel_crtc_config *pipe_config);
+ int crtc_mask;
+ enum hpd_pin hpd_pin;
+};
+
+struct intel_panel {
+ struct drm_display_mode *fixed_mode;
+ int fitting_mode;
+};
+
+struct intel_connector {
+ struct drm_connector base;
+ /*
+ * The fixed encoder this connector is connected to.
+ */
+ struct intel_encoder *encoder;
+
+ /*
+ * The new encoder this connector will be driven. Only differs from
+ * encoder while a modeset is in progress.
+ */
+ struct intel_encoder *new_encoder;
+
+ /* Reads out the current hw, returning true if the connector is enabled
+ * and active (i.e. dpms ON state). */
+ bool (*get_hw_state)(struct intel_connector *);
+
+ /* Panel info for eDP and LVDS */
+ struct intel_panel panel;
+
+ /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+ struct edid *edid;
+
+ /* since POLL and HPD connectors may use the same HPD line keep the native
+ state of connector->polled in case hotplug storm detection changes it */
+ u8 polled;
+};
+
+typedef struct dpll {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+} intel_clock_t;
+
+struct intel_crtc_config {
+ /**
+ * quirks - bitfield with hw state readout quirks
+ *
+ * For various reasons the hw state readout code might not be able to
+ * completely faithfully read out the current state. These cases are
+ * tracked with quirk flags so that fastboot and state checker can act
+ * accordingly.
+ */
+#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
+ unsigned long quirks;
+
+ struct drm_display_mode requested_mode;
+ struct drm_display_mode adjusted_mode;
+ /* This flag must be set by the encoder's compute_config callback if it
+ * changes the crtc timings in the mode to prevent the crtc fixup from
+ * overwriting them. Currently only lvds needs that. */
+ bool timings_set;
+ /* Whether to set up the PCH/FDI. Note that we never allow sharing
+ * between pch encoders and cpu encoders. */
+ bool has_pch_encoder;
+
+ /* CPU Transcoder for the pipe. Currently this can only differ from the
+ * pipe on Haswell (where we have a special eDP transcoder). */
+ enum transcoder cpu_transcoder;
+
+ /*
+ * Use reduced/limited/broadcast rbg range, compressing from the full
+ * range fed into the crtcs.
+ */
+ bool limited_color_range;
+
+ /* DP has a bunch of special case unfortunately, so mark the pipe
+ * accordingly. */
+ bool has_dp_encoder;
+
+ /*
+ * Enable dithering, used when the selected pipe bpp doesn't match the
+ * plane bpp.
+ */
+ bool dither;
+
+ /* Controls for the clock computation, to override various stages. */
+ bool clock_set;
+
+ /* SDVO TV has a bunch of special case. To make multifunction encoders
+ * work correctly, we need to track this at runtime.*/
+ bool sdvo_tv_clock;
+
+ /*
+ * crtc bandwidth limit, don't increase pipe bpp or clock if not really
+ * required. This is set in the 2nd loop of calling encoder's
+ * ->compute_config if the first pick doesn't work out.
+ */
+ bool bw_constrained;
+
+ /* Settings for the intel dpll used on pretty much everything but
+ * haswell. */
+ struct dpll dpll;
+
+ /* Selected dpll when shared or DPLL_ID_PRIVATE. */
+ enum intel_dpll_id shared_dpll;
+
+ /* Actual register state of the dpll, for shared dpll cross-checking. */
+ struct intel_dpll_hw_state dpll_hw_state;
+
+ int pipe_bpp;
+ struct intel_link_m_n dp_m_n;
+
+ /*
+ * Frequence the dpll for the port should run at. Differs from the
+ * adjusted dotclock e.g. for DP or 12bpc hdmi mode.
+ */
+ int port_clock;
+
+ /* Used by SDVO (and if we ever fix it, HDMI). */
+ unsigned pixel_multiplier;
+
+ /* Panel fitter controls for gen2-gen4 + VLV */
+ struct {
+ u32 control;
+ u32 pgm_ratios;
+ u32 lvds_border_bits;
+ } gmch_pfit;
+
+ /* Panel fitter placement and size for Ironlake+ */
+ struct {
+ u32 pos;
+ u32 size;
+ } pch_pfit;
+
+ /* FDI configuration, only valid if has_pch_encoder is set. */
+ int fdi_lanes;
+ struct intel_link_m_n fdi_m_n;
+
+ bool ips_enabled;
+};
+struct intel_crtc {
+ struct drm_crtc base;
+ enum pipe pipe;
+ enum plane plane;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ /*
+ * Whether the crtc and the connected output pipeline is active. Implies
+ * that crtc->enabled is set, i.e. the current mode configuration has
+ * some outputs connected to this crtc.
+ */
+ bool active;
+ bool eld_vld;
+ bool primary_disabled; /* is the crtc obscured by a plane? */
+ bool lowfreq_avail;
+ struct intel_overlay *overlay;
+ struct intel_unpin_work *unpin_work;
+
+ atomic_t unpin_work_count;
+
+ /* Display surface base address adjustement for pageflips. Note that on
+ * gen4+ this only adjusts up to a tile, offsets within a tile are
+ * handled in the hw itself (with the TILEOFF register). */
+ unsigned long dspaddr_offset;
+
+ struct drm_i915_gem_object *cursor_bo;
+ uint32_t cursor_addr;
+ int16_t cursor_x, cursor_y;
+ int16_t cursor_width, cursor_height;
+ bool cursor_visible;
+
+ struct intel_crtc_config config;
+
+ uint32_t ddi_pll_sel;
+
+ /* reset counter value when the last flip was submitted */
+ unsigned int reset_counter;
+
+ /* Access to these should be protected by dev_priv->irq_lock. */
+ bool cpu_fifo_underrun_disabled;
+ bool pch_fifo_underrun_disabled;
+};
+
+struct intel_plane {
+ struct drm_plane base;
+ int plane;
+ enum pipe pipe;
+ struct drm_i915_gem_object *obj;
+ bool can_scale;
+ int max_downscale;
+ u32 lut_r[1024], lut_g[1024], lut_b[1024];
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y;
+ uint32_t src_w, src_h;
+
+ /* Since we need to change the watermarks before/after
+ * enabling/disabling the planes, we need to store the parameters here
+ * as the other pieces of the struct may not reflect the values we want
+ * for the watermark calculations. Currently only Haswell uses this.
+ */
+ struct {
+ bool enable;
+ uint8_t bytes_per_pixel;
+ uint32_t horiz_pixels;
+ } wm;
+
+ void (*update_plane)(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ int x, int y,
+ uint32_t src_w, uint32_t src_h);
+ void (*disable_plane)(struct drm_plane *plane);
+ int (*update_colorkey)(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key);
+ void (*get_colorkey)(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key);
+};
+
+struct intel_watermark_params {
+ unsigned long fifo_size;
+ unsigned long max_wm;
+ unsigned long default_wm;
+ unsigned long guard_size;
+ unsigned long cacheline_size;
+};
+
+struct cxsr_latency {
+ int is_desktop;
+ int is_ddr3;
+ unsigned long fsb_freq;
+ unsigned long mem_freq;
+ unsigned long display_sr;
+ unsigned long display_hpll_disable;
+ unsigned long cursor_sr;
+ unsigned long cursor_hpll_disable;
+};
+
+#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define to_intel_plane(x) container_of(x, struct intel_plane, base)
+
+#define DIP_HEADER_SIZE 5
+
+#define DIP_TYPE_AVI 0x82
+#define DIP_VERSION_AVI 0x2
+#define DIP_LEN_AVI 13
+#define DIP_AVI_PR_1 0
+#define DIP_AVI_PR_2 1
+#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
+#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
+#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
+
+#define DIP_TYPE_SPD 0x83
+#define DIP_VERSION_SPD 0x1
+#define DIP_LEN_SPD 25
+#define DIP_SPD_UNKNOWN 0
+#define DIP_SPD_DSTB 0x1
+#define DIP_SPD_DVDP 0x2
+#define DIP_SPD_DVHS 0x3
+#define DIP_SPD_HDDVR 0x4
+#define DIP_SPD_DVC 0x5
+#define DIP_SPD_DSC 0x6
+#define DIP_SPD_VCD 0x7
+#define DIP_SPD_GAME 0x8
+#define DIP_SPD_PC 0x9
+#define DIP_SPD_BD 0xa
+#define DIP_SPD_SCD 0xb
+
+struct dip_infoframe {
+ uint8_t type; /* HB0 */
+ uint8_t ver; /* HB1 */
+ uint8_t len; /* HB2 - body len, not including checksum */
+ uint8_t ecc; /* Header ECC */
+ uint8_t checksum; /* PB0 */
+ union {
+ struct {
+ /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
+ uint8_t Y_A_B_S;
+ /* PB2 - C 7:6, M 5:4, R 3:0 */
+ uint8_t C_M_R;
+ /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
+ uint8_t ITC_EC_Q_SC;
+ /* PB4 - VIC 6:0 */
+ uint8_t VIC;
+ /* PB5 - PR 3:0 */
+ uint8_t YQ_CN_PR;
+ /* PB6 to PB13 */
+ uint16_t top_bar_end;
+ uint16_t bottom_bar_start;
+ uint16_t left_bar_end;
+ uint16_t right_bar_start;
+ } avi;
+ struct {
+ uint8_t vn[8];
+ uint8_t pd[16];
+ uint8_t sdi;
+ } spd;
+ uint8_t payload[27];
+ } __attribute__ ((packed)) body;
+} __attribute__((packed));
+
+struct intel_hdmi {
+ u32 hdmi_reg;
+ int ddc_bus;
+ uint32_t color_range;
+ bool color_range_auto;
+ bool has_hdmi_sink;
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ bool rgb_quant_range_selectable;
+ void (*write_infoframe)(struct drm_encoder *encoder,
+ struct dip_infoframe *frame);
+ void (*set_infoframes)(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode);
+};
+
+#define DP_MAX_DOWNSTREAM_PORTS 0x10
+#define DP_LINK_CONFIGURATION_SIZE 9
+
+struct intel_dp {
+ uint32_t output_reg;
+ uint32_t aux_ch_ctl_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ uint32_t color_range;
+ bool color_range_auto;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+ uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ struct i2c_adapter adapter;
+ struct i2c_algo_dp_aux_data algo;
+ uint8_t train_set[4];
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ timeout_id_t vdd_worktimer_id;
+ bool want_panel_vdd;
+ struct intel_connector *attached_connector;
+};
+
+struct intel_digital_port {
+ struct intel_encoder base;
+ enum port port;
+ u32 saved_port_bits;
+ struct intel_dp dp;
+ struct intel_hdmi hdmi;
+};
+
+static inline int
+vlv_dport_to_channel(struct intel_digital_port *dport)
+{
+ switch (dport->port) {
+ case PORT_B:
+ return (0);
+ case PORT_C:
+ return (1);
+ default:
+ BUG();
+ }
+ return (0);
+}
+
+static inline struct drm_crtc *
+intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->pipe_to_crtc_mapping[pipe];
+}
+
+static inline struct drm_crtc *
+intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->plane_to_crtc_mapping[plane];
+}
+
+struct intel_unpin_work {
+ struct work_struct work;
+ struct drm_crtc *crtc;
+ struct drm_i915_gem_object *old_fb_obj;
+ struct drm_i915_gem_object *pending_flip_obj;
+ struct drm_pending_vblank_event *event;
+ atomic_t pending;
+#define INTEL_FLIP_INACTIVE 0
+#define INTEL_FLIP_PENDING 1
+#define INTEL_FLIP_COMPLETE 2
+ bool enable_stall_check;
+};
+
+struct intel_fbc_work {
+ struct work_struct work;
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ unsigned long interval;
+};
+
+int intel_pch_rawclk(struct drm_device *dev);
+
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+
+extern void intel_attach_force_audio_property(struct drm_connector *connector);
+extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
+extern void intel_crt_init(struct drm_device *dev);
+extern void intel_hdmi_init(struct drm_device *dev,
+ int hdmi_reg, enum port port);
+extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
+extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
+ bool is_sdvob);
+extern void intel_dvo_init(struct drm_device *dev);
+extern void intel_tv_init(struct drm_device *dev);
+extern void intel_mark_busy(struct drm_device *dev);
+extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring);
+extern void intel_mark_idle(struct drm_device *dev);
+extern void intel_lvds_init(struct drm_device *dev);
+extern bool intel_is_dual_link_lvds(struct drm_device *dev);
+extern void intel_dp_init(struct drm_device *dev, int output_reg,
+ enum port port);
+extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
+extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
+extern bool intel_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+extern bool intel_dpd_is_edp(struct drm_device *dev);
+extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
+extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+ enum plane plane);
+
+/* intel_panel.c */
+extern int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode);
+extern void intel_panel_fini(struct intel_panel *panel);
+
+extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode);
+extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config,
+ int fitting_mode);
+extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config,
+ int fitting_mode);
+extern void intel_panel_set_backlight(struct drm_device *dev,
+ u32 level, u32 max);
+extern int intel_panel_setup_backlight(struct drm_connector *connector);
+extern void intel_panel_enable_backlight(struct drm_device *dev,
+ enum pipe pipe);
+extern void intel_panel_disable_backlight(struct drm_device *dev);
+extern void intel_panel_destroy_backlight(struct drm_device *dev);
+extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+
+struct intel_set_config {
+ struct drm_encoder **save_connector_encoders;
+ struct drm_crtc **save_encoder_crtcs;
+
+ bool fb_changed;
+ bool mode_changed;
+};
+
+extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *old_fb);
+extern void intel_modeset_disable(struct drm_device *dev);
+extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
+extern void intel_crtc_load_lut(struct drm_crtc *crtc);
+extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
+extern void intel_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
+extern void intel_connector_dpms(struct drm_connector *, int mode);
+extern bool intel_connector_get_hw_state(struct intel_connector *connector);
+extern void intel_modeset_check_state(struct drm_device *dev);
+extern void intel_plane_restore(struct drm_plane *plane);
+extern void intel_plane_disable(struct drm_plane *plane);
+
+
+static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
+{
+ return to_intel_connector(connector)->encoder;
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_digital_port, base.base);
+}
+
+static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+ return &enc_to_dig_port(encoder)->dp;
+}
+
+static inline struct intel_digital_port *
+dp_to_dig_port(struct intel_dp *intel_dp)
+{
+ return container_of(intel_dp, struct intel_digital_port, dp);
+}
+
+static inline struct intel_digital_port *
+hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
+{
+ return container_of(intel_hdmi, struct intel_digital_port, hdmi);
+}
+
+bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port);
+
+extern void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+
+extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
+int intel_get_pipe_from_crtc_id(DRM_IOCTL_ARGS);
+extern enum transcoder
+intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
+
+struct intel_load_detect_pipe {
+ struct drm_framebuffer *release_fb;
+ bool load_detect_temp;
+ int dpms_mode;
+};
+extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct intel_load_detect_pipe *old);
+extern void intel_release_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old);
+
+extern void intelfb_restore(void);
+extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+extern void intel_enable_clock_gating(struct drm_device *dev);
+
+extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
+extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
+
+extern int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *ifb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_i915_gem_object *obj);
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_initial_config(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
+extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
+extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
+extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+
+extern void intel_setup_overlay(struct drm_device *dev);
+extern void intel_cleanup_overlay(struct drm_device *dev);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay);
+extern int intel_overlay_put_image(DRM_IOCTL_ARGS);
+extern int intel_overlay_attrs(DRM_IOCTL_ARGS);
+
+extern void intel_fb_output_poll_changed(struct drm_device *dev);
+extern void intel_fb_restore_mode(struct drm_device *dev);
+
+extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool state);
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
+extern void intel_init_clock_gating(struct drm_device *dev);
+extern void intel_suspend_hw(struct drm_device *dev);
+extern void intel_write_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+extern void intel_prepare_ddi(struct drm_device *dev);
+extern void hsw_fdi_link_train(struct drm_crtc *crtc);
+extern void intel_ddi_init(struct drm_device *dev, enum port port);
+
+/* For use by IVB LP watermark workaround in intel_sprite.c */
+extern void intel_update_watermarks(struct drm_device *dev);
+extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width,
+ int pixel_size, bool enable);
+
+extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+ unsigned int tiling_mode,
+ unsigned int bpp,
+ unsigned int pitch);
+
+extern int intel_sprite_set_colorkey(DRM_IOCTL_ARGS);
+extern int intel_sprite_get_colorkey(DRM_IOCTL_ARGS);
+
+
+/* Power-related functions, located in intel_pm.c */
+extern void intel_init_pm(struct drm_device *dev);
+/* FBC */
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern void intel_update_fbc(struct drm_device *dev);
+/* IPS */
+extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+extern void intel_gpu_ips_teardown(void);
+
+/* Power well */
+extern int i915_init_power_well(struct drm_device *dev);
+extern void i915_remove_power_well(struct drm_device *dev);
+
+extern bool intel_display_power_enabled(struct drm_device *dev,
+ enum intel_display_power_domain domain);
+extern void intel_init_power_well(struct drm_device *dev);
+extern void intel_set_power_well(struct drm_device *dev, bool enable);
+extern void intel_enable_gt_powersave(struct drm_device *dev);
+extern void intel_disable_gt_powersave(struct drm_device *dev);
+extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
+extern void ironlake_teardown_rc6(struct drm_device *dev);
+
+extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe);
+extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
+extern void intel_ddi_pll_init(struct drm_device *dev);
+extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
+extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder);
+extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
+extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+extern bool
+intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+
+extern void intel_display_handle_reset(struct drm_device *dev);
+extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe,
+ bool enable);
+extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
+ bool enable);
+
+#endif /* __INTEL_DRV_H__ */
diff --git a/usr/src/uts/intel/io/i915/intel_dvo.c b/usr/src/uts/intel/io/i915/intel_dvo.c
new file mode 100644
index 0000000..7003cbb
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_dvo.c
@@ -0,0 +1,557 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2006-2007, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_sun_i2c.h" /* OSOL_i915 */
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "dvo.h"
+
+#define SIL164_ADDR 0x38
+#define CH7xxx_ADDR 0x76
+#define TFP410_ADDR 0x38
+#define NS2501_ADDR 0x38
+
+static const struct intel_dvo_device intel_dvo_devices[] = {
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "sil164",
+ .dvo_reg = DVOC,
+ .slave_addr = SIL164_ADDR,
+ .dev_ops = &sil164_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ch7xxx",
+ .dvo_reg = DVOC,
+ .slave_addr = CH7xxx_ADDR,
+ .dev_ops = &ch7xxx_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ch7xxx",
+ .dvo_reg = DVOC,
+ .slave_addr = 0x75, /* For some ch7010 */
+ .dev_ops = &ch7xxx_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_LVDS,
+ .name = "ivch",
+ .dvo_reg = DVOA,
+ .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
+ .dev_ops = &ivch_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "tfp410",
+ .dvo_reg = DVOC,
+ .slave_addr = TFP410_ADDR,
+ .dev_ops = &tfp410_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_LVDS,
+ .name = "ch7017",
+ .dvo_reg = DVOC,
+ .slave_addr = 0x75,
+ .gpio = GMBUS_PORT_DPB,
+ .dev_ops = &ch7017_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ns2501",
+ .dvo_reg = DVOC,
+ .slave_addr = NS2501_ADDR,
+ .dev_ops = &ns2501_ops,
+ }
+};
+
+struct intel_dvo {
+ struct intel_encoder base;
+
+ struct intel_dvo_device dev;
+
+ struct drm_display_mode *panel_fixed_mode;
+ bool panel_wants_dither;
+};
+
+static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_dvo, base.base);
+}
+
+static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_dvo, base);
+}
+
+static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+
+ return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
+}
+
+static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+ if (!(tmp & DVO_ENABLE))
+ return false;
+
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_dvo_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 tmp, flags = 0;
+
+ tmp = I915_READ(intel_dvo->dev.dvo_reg);
+ if (tmp & DVO_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (tmp & DVO_VSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ pipe_config->adjusted_mode.flags |= flags;
+}
+
+static void intel_disable_dvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 dvo_reg = intel_dvo->dev.dvo_reg;
+ u32 temp = I915_READ(dvo_reg);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+ I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
+ I915_READ(dvo_reg);
+}
+
+static void intel_enable_dvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ u32 dvo_reg = intel_dvo->dev.dvo_reg;
+ u32 temp = I915_READ(dvo_reg);
+
+ I915_WRITE(dvo_reg, temp | DVO_ENABLE);
+ I915_READ(dvo_reg);
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+}
+
+/* Special dpms function to support cloning between dvo/sdvo/crt. */
+static void intel_dvo_dpms(struct drm_connector *connector, int mode)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_crtc *crtc;
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = intel_dvo->base.base.crtc;
+ if (!crtc) {
+ intel_dvo->base.connectors_active = false;
+ return;
+ }
+
+ /* We call connector dpms manually below in case pipe dpms doesn't
+ * change due to cloning. */
+ if (mode == DRM_MODE_DPMS_ON) {
+ intel_dvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+ } else {
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+
+ intel_dvo->base.connectors_active = false;
+
+ intel_crtc_update_dpms(crtc);
+ }
+
+ intel_modeset_check_state(connector->dev);
+}
+
+static int intel_dvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* XXX: Validate clock range */
+
+ if (intel_dvo->panel_fixed_mode) {
+ if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
+}
+
+static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+
+ /* If we have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ if (intel_dvo->panel_fixed_mode != NULL) {
+#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
+ C(hdisplay);
+ C(hsync_start);
+ C(hsync_end);
+ C(htotal);
+ C(vdisplay);
+ C(vsync_start);
+ C(vsync_end);
+ C(vtotal);
+ C(clock);
+#undef C
+ }
+
+ if (intel_dvo->dev.dev_ops->mode_fixup)
+ return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
+
+ return true;
+}
+
+static void intel_dvo_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ int pipe = intel_crtc->pipe;
+ u32 dvo_val;
+ u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
+ int dpll_reg = DPLL(pipe);
+
+ switch (dvo_reg) {
+ case DVOA:
+ default:
+ dvo_srcdim_reg = DVOA_SRCDIM;
+ break;
+ case DVOB:
+ dvo_srcdim_reg = DVOB_SRCDIM;
+ break;
+ case DVOC:
+ dvo_srcdim_reg = DVOC_SRCDIM;
+ break;
+ }
+
+ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
+
+ /* Save the data order, since I don't know what it should be set to. */
+ dvo_val = I915_READ(dvo_reg) &
+ (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
+ dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
+ DVO_BLANK_ACTIVE_HIGH;
+
+ if (pipe == 1)
+ dvo_val |= DVO_PIPE_B_SELECT;
+ dvo_val |= DVO_PIPE_STALL;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
+
+ I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
+
+ /*I915_WRITE(DVOB_SRCDIM,
+ (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
+ (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
+ I915_WRITE(dvo_srcdim_reg,
+ (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
+ (adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
+ /*I915_WRITE(DVOB, dvo_val);*/
+ I915_WRITE(dvo_reg, dvo_val);
+}
+
+/**
+ * Detect the output connection on our DVO device.
+ *
+ * Unimplemented.
+ */
+static enum drm_connector_status
+intel_dvo_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
+}
+
+static int intel_dvo_get_modes(struct drm_connector *connector)
+{
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+ /* We should probably have an i2c driver get_modes function for those
+ * devices which will have a fixed set of modes determined by the chip
+ * (TV-out, for example), but for now with just TMDS and LVDS,
+ * that's not the case.
+ */
+ (void) intel_ddc_get_modes(connector,
+ intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
+ if (!list_empty(&connector->probed_modes))
+ return 1;
+
+ if (intel_dvo->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void intel_dvo_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+ kfree(connector, sizeof(struct intel_connector));
+}
+
+static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
+ .mode_fixup = intel_dvo_mode_fixup,
+ .mode_set = intel_dvo_mode_set,
+};
+
+static const struct drm_connector_funcs intel_dvo_connector_funcs = {
+ .dpms = intel_dvo_dpms,
+ .detect = intel_dvo_detect,
+ .destroy = intel_dvo_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
+ .mode_valid = intel_dvo_mode_valid,
+ .get_modes = intel_dvo_get_modes,
+ .best_encoder = intel_best_encoder,
+};
+
+static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+
+ if (intel_dvo->dev.dev_ops->destroy)
+ intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
+
+ kfree(intel_dvo->panel_fixed_mode, sizeof(*intel_dvo->panel_fixed_mode));
+
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
+ .destroy = intel_dvo_enc_destroy,
+};
+
+/**
+ * Attempts to get a fixed panel timing for LVDS (currently only the i830).
+ *
+ * Other chips with DVO LVDS will need to extend this to deal with the LVDS
+ * chip being on DVOB/C and having multiple pipes.
+ */
+static struct drm_display_mode *
+intel_dvo_get_current_mode (struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
+ struct drm_display_mode *mode = NULL;
+
+ /* If the DVO port is active, that'll be the LVDS, so we can pull out
+ * its timings to get how the BIOS set up the panel.
+ */
+ if (dvo_val & DVO_ENABLE) {
+ struct drm_crtc *crtc;
+ int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
+
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+ if (crtc) {
+ mode = intel_crtc_mode_get(dev, crtc);
+ if (mode) {
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (dvo_val & DVO_VSYNC_ACTIVE_HIGH)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ }
+ }
+ }
+
+ return mode;
+}
+
+void intel_dvo_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ struct intel_dvo *intel_dvo;
+ struct intel_connector *intel_connector;
+ int i;
+ int encoder_type = DRM_MODE_ENCODER_NONE;
+
+ intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
+ if (!intel_dvo)
+ return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dvo, sizeof(*intel_dvo));
+ return;
+ }
+
+ intel_encoder = &intel_dvo->base;
+ (void) drm_encoder_init(dev, &intel_encoder->base,
+ &intel_dvo_enc_funcs, encoder_type);
+
+ intel_encoder->disable = intel_disable_dvo;
+ intel_encoder->enable = intel_enable_dvo;
+ intel_encoder->get_hw_state = intel_dvo_get_hw_state;
+ intel_encoder->get_config = intel_dvo_get_config;
+ intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+
+ /* Now, try to find a controller */
+ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
+ struct drm_connector *connector = &intel_connector->base;
+ const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+ struct i2c_adapter *i2c;
+ int gpio;
+
+ /* Allow the I2C driver info to specify the GPIO to be used in
+ * special cases, but otherwise default to what's defined
+ * in the spec.
+ */
+ if (intel_gmbus_is_port_valid(dvo->gpio))
+ gpio = dvo->gpio;
+ else if (dvo->type == INTEL_DVO_CHIP_LVDS)
+ gpio = GMBUS_PORT_SSC;
+ else
+ gpio = GMBUS_PORT_DPB;
+
+ /* Set up the I2C bus necessary for the chip we're probing.
+ * It appears that everything is on GPIOE except for panels
+ * on i830 laptops, which are on GPIOB (DVOA).
+ */
+ i2c = intel_gmbus_get_adapter(dev_priv, gpio);
+
+ intel_dvo->dev = *dvo;
+ if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
+ continue;
+
+ intel_encoder->type = INTEL_OUTPUT_DVO;
+ intel_encoder->type_size = sizeof(struct intel_dvo);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ switch (dvo->type) {
+ case INTEL_DVO_CHIP_TMDS:
+ intel_encoder->cloneable = true;
+ (void) drm_connector_init(dev, connector,
+ &intel_dvo_connector_funcs,
+ DRM_MODE_CONNECTOR_DVII);
+ encoder_type = DRM_MODE_ENCODER_TMDS;
+ break;
+ case INTEL_DVO_CHIP_LVDS:
+ intel_encoder->cloneable = false;
+ (void) drm_connector_init(dev, connector,
+ &intel_dvo_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ encoder_type = DRM_MODE_ENCODER_LVDS;
+ break;
+ }
+
+ drm_connector_helper_add(connector,
+ &intel_dvo_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_encoder_helper_add(&intel_encoder->base,
+ &intel_dvo_helper_funcs);
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ if (dvo->type == INTEL_DVO_CHIP_LVDS) {
+ /* For our LVDS chipsets, we should hopefully be able
+ * to dig the fixed panel mode out of the BIOS data.
+ * However, it's in a different format from the BIOS
+ * data on chipsets with integrated LVDS (stored in AIM
+ * headers, likely), so for now, just get the current
+ * mode being output through DVO.
+ */
+ intel_dvo->panel_fixed_mode =
+ intel_dvo_get_current_mode(connector);
+ intel_dvo->panel_wants_dither = true;
+ }
+
+ /* OSOL_i915: drm_sysfs_connector_add(connector); */
+ return;
+ }
+
+ drm_encoder_cleanup(&intel_encoder->base);
+ kfree(intel_dvo, sizeof(*intel_dvo));
+ kfree(intel_connector, sizeof(*intel_connector));
+}
diff --git a/usr/src/uts/intel/io/i915/intel_fb.c b/usr/src/uts/intel/io/i915/intel_fb.c
new file mode 100644
index 0000000..5eb89ca
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_fb.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012, 2013, Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * David Airlie
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static int intelfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
+ struct drm_device *dev = ifbdev->helper.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_i915_gem_object *obj;
+ int size, ret;
+
+ /* we don't do packed 24bpp */
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
+
+ (void) memset(&mode_cmd, 0, sizeof(struct drm_mode_fb_cmd2));
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
+ 8), 64);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ obj = dev_priv->fbcon_obj;
+
+ if (!obj) {
+ DRM_ERROR("There is no framebuffer for console");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
+ if (ret)
+ goto out_unpin;
+
+ fb = &ifbdev->ifb.base;
+
+ ifbdev->helper.fb = fb;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin(obj);
+out_unref:
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+out:
+ return ret;
+}
+
+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .gamma_set = intel_crtc_fb_gamma_set,
+ .gamma_get = intel_crtc_fb_gamma_get,
+ .fb_probe = intelfb_create,
+};
+
+static void intel_fbdev_destroy(struct drm_device *dev,
+ struct intel_fbdev *ifbdev)
+{
+ struct intel_framebuffer *ifb = &ifbdev->ifb;
+
+ drm_fb_helper_fini(&ifbdev->helper);
+
+ drm_framebuffer_unregister_private(&ifb->base);
+ drm_framebuffer_cleanup(&ifb->base);
+ if (ifb->obj) {
+ drm_gem_object_unreference_unlocked(&ifb->obj->base);
+ ifb->obj = NULL;
+ }
+}
+
+int intel_fbdev_init(struct drm_device *dev)
+{
+ struct intel_fbdev *ifbdev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+ if (!ifbdev)
+ return -ENOMEM;
+
+ dev_priv->fbdev = ifbdev;
+ ifbdev->helper.funcs = &intel_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, &ifbdev->helper,
+ INTEL_INFO(dev)->num_pipes,
+ INTELFB_CONN_LIMIT);
+ if (ret) {
+ kfree(ifbdev, sizeof(struct intel_fbdev));
+ return ret;
+ }
+
+ (void) drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
+ return 0;
+}
+
+void intel_fbdev_initial_config(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Due to peculiar init order wrt to hpd handling this is separate. */
+ (void) drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 16);
+}
+
+void intel_fbdev_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (!dev_priv->fbdev)
+ return;
+
+ intel_fbdev_destroy(dev, dev_priv->fbdev);
+ kfree(dev_priv->fbdev, sizeof(struct intel_fbdev));
+ dev_priv->fbdev = NULL;
+}
+
+void intel_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (dev_priv->fbdev != NULL)
+ (void) drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+}
+
+void intel_fb_restore_mode(struct drm_device *dev)
+{
+ int ret;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return;
+
+ drm_modeset_lock_all(dev);
+
+ ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
+ if (ret)
+ DRM_DEBUG("failed to restore crtc mode\n");
+
+ drm_modeset_unlock_all(dev);
+}
diff --git a/usr/src/uts/intel/io/i915/intel_hdmi.c b/usr/src/uts/intel/io/i915/intel_hdmi.c
new file mode 100644
index 0000000..5cafe9a
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_hdmi.c
@@ -0,0 +1,1274 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2006-2009, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "drm_sun_i2c.h" /* OSOL_i915 */
+
+static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
+{
+ return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
+}
+
+static void
+assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
+{
+ struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t enabled_bits;
+
+ enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+
+ if (I915_READ(intel_hdmi->hdmi_reg) & enabled_bits)
+ DRM_ERROR("HDMI port enabled, expecting disabled\n");
+}
+
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port =
+ container_of(encoder, struct intel_digital_port, base.base);
+ return &intel_dig_port->hdmi;
+}
+
+static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
+{
+ return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
+}
+
+void intel_dip_infoframe_csum(struct dip_infoframe *frame)
+{
+ uint8_t *data = (uint8_t *)frame;
+ uint8_t sum = 0;
+ unsigned i;
+
+ frame->checksum = 0;
+ frame->ecc = 0;
+
+ for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
+ sum += data[i];
+
+ frame->checksum = 0x100 - sum;
+}
+
+static u32 g4x_infoframe_index(struct dip_infoframe *frame)
+{
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return VIDEO_DIP_SELECT_AVI;
+ case DIP_TYPE_SPD:
+ return VIDEO_DIP_SELECT_SPD;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
+}
+
+static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
+{
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return VIDEO_DIP_ENABLE_AVI;
+ case DIP_TYPE_SPD:
+ return VIDEO_DIP_ENABLE_SPD;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
+}
+
+static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
+{
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return VIDEO_DIP_ENABLE_AVI_HSW;
+ case DIP_TYPE_SPD:
+ return VIDEO_DIP_ENABLE_SPD_HSW;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
+}
+
+static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame,
+ enum transcoder cpu_transcoder)
+{
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
+ case DIP_TYPE_SPD:
+ return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
+}
+
+static void g4x_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ /* LINTED */
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val = I915_READ(VIDEO_DIP_CTL);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+
+ if (!(val & VIDEO_DIP_ENABLE))
+ DRM_ERROR("Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ val &= ~g4x_infoframe_enable(frame);
+
+ I915_WRITE(VIDEO_DIP_CTL, val);
+
+// mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VIDEO_DIP_DATA, *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VIDEO_DIP_DATA, 0);
+// mmiowb();
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(VIDEO_DIP_CTL, val);
+ POSTING_READ(VIDEO_DIP_CTL);
+}
+
+static void ibx_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ /* LINTED */
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(reg);
+
+ if (!(val & VIDEO_DIP_ENABLE))
+ DRM_ERROR("Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ val &= ~g4x_infoframe_enable(frame);
+
+ I915_WRITE(reg, val);
+
+// mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+// mmiowb();
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+static void cpt_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ /* LINTED */
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(reg);
+
+ if (!(val & VIDEO_DIP_ENABLE))
+ DRM_ERROR("Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ /* The DIP control register spec says that we need to update the AVI
+ * infoframe without clearing its enable bit */
+ if (frame->type != DIP_TYPE_AVI)
+ val &= ~g4x_infoframe_enable(frame);
+
+ I915_WRITE(reg, val);
+
+// mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+// mmiowb();
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+static void vlv_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ /* LINTED */
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(reg);
+
+ if (!(val & VIDEO_DIP_ENABLE))
+ DRM_ERROR("Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ val &= ~g4x_infoframe_enable(frame);
+
+ I915_WRITE(reg, val);
+
+// mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+// mmiowb();
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+static void hsw_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ /* LINTED */
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder);
+ unsigned int i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(ctl_reg);
+
+ if (data_reg == 0)
+ return;
+
+ val &= ~hsw_infoframe_enable(frame);
+ I915_WRITE(ctl_reg, val);
+
+// mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(data_reg + i, *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(data_reg + i, 0);
+// mmiowb();
+
+ val |= hsw_infoframe_enable(frame);
+ I915_WRITE(ctl_reg, val);
+ POSTING_READ(ctl_reg);
+}
+
+static void intel_set_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ intel_dip_infoframe_csum(frame);
+ intel_hdmi->write_infoframe(encoder, frame);
+}
+
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+
+ if (intel_hdmi->rgb_quant_range_selectable) {
+ if (intel_crtc->config.limited_color_range)
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+ else
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+ }
+
+ avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
+
+ intel_set_infoframe(encoder, &avi_if);
+}
+
+static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+{
+ struct dip_infoframe spd_if;
+
+ memset(&spd_if, 0, sizeof(spd_if));
+ spd_if.type = DIP_TYPE_SPD;
+ spd_if.ver = DIP_VERSION_SPD;
+ spd_if.len = DIP_LEN_SPD;
+ strcpy((char *)spd_if.body.spd.vn, "Intel");
+ strcpy((char *)spd_if.body.spd.pd, "Integrated gfx");
+ spd_if.body.spd.sdi = DIP_SPD_PC;
+
+ intel_set_infoframe(encoder, &spd_if);
+}
+
+static void g4x_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ u32 reg = VIDEO_DIP_CTL;
+ u32 val = I915_READ(reg);
+ u32 port;
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* If the registers were not initialized yet, they might be zeroes,
+ * which means we're selecting the AVI DIP and we're setting its
+ * frequency to once. This seems to really confuse the HW and make
+ * things stop working (the register spec says the AVI always needs to
+ * be sent every VSync). So here we avoid writing to the register more
+ * than we need and also explicitly select the AVI DIP and explicitly
+ * set its frequency to every VSync. Avoiding to write it twice seems to
+ * be enough to solve the problem, but being defensive shouldn't hurt us
+ * either. */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ switch (intel_dig_port->port) {
+ case PORT_B:
+ port = VIDEO_DIP_PORT_B;
+ break;
+ case PORT_C:
+ port = VIDEO_DIP_PORT_C;
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~VIDEO_DIP_ENABLE_VENDOR;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void ibx_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+ u32 port;
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ switch (intel_dig_port->port) {
+ case PORT_B:
+ port = VIDEO_DIP_PORT_B;
+ break;
+ case PORT_C:
+ port = VIDEO_DIP_PORT_C;
+ break;
+ case PORT_D:
+ port = VIDEO_DIP_PORT_D;
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void cpt_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ /* Set both together, unset both together: see the spec. */
+ val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void vlv_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void hsw_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ I915_WRITE(reg, 0);
+ POSTING_READ(reg);
+ return;
+ }
+
+ val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
+ VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void intel_hdmi_mode_set(struct drm_encoder *encoder,
+ /* LINTED */
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 hdmi_val;
+
+ hdmi_val = SDVO_ENCODING_HDMI;
+ if (!HAS_PCH_SPLIT(dev))
+ hdmi_val |= intel_hdmi->color_range;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
+
+ if (intel_crtc->config.pipe_bpp > 24)
+ hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
+ else
+ hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
+
+ /* Required on CPT */
+ if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
+ hdmi_val |= HDMI_MODE_SELECT_HDMI;
+
+ if (intel_hdmi->has_audio) {
+ DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+ hdmi_val |= SDVO_AUDIO_ENABLE;
+ hdmi_val |= HDMI_MODE_SELECT_HDMI;
+ intel_write_eld(encoder, adjusted_mode);
+ }
+
+ if (HAS_PCH_CPT(dev))
+ hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+ else
+ hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe);
+
+ I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+
+ intel_hdmi->set_infoframes(encoder, adjusted_mode);
+}
+
+static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_hdmi->hdmi_reg);
+
+ if (!(tmp & SDVO_ENABLE))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_hdmi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ u32 tmp, flags = 0;
+
+ tmp = I915_READ(intel_hdmi->hdmi_reg);
+
+ if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (tmp & SDVO_VSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ pipe_config->adjusted_mode.flags |= flags;
+}
+
+static void intel_enable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 temp;
+ u32 enable_bits = SDVO_ENABLE;
+
+ if (intel_hdmi->has_audio)
+ enable_bits |= SDVO_AUDIO_ENABLE;
+
+ temp = I915_READ(intel_hdmi->hdmi_reg);
+
+ /* HW workaround for IBX, we need to move the port to transcoder A
+ * before disabling it, so restore the transcoder select bit here. */
+ if (HAS_PCH_IBX(dev))
+ enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
+
+ /* HW workaround, need to toggle enable bit off and on for 12bpc, but
+ * we do this anyway which shows more stable in testing.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+ }
+
+ temp |= enable_bits;
+
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+
+ /* HW workaround, need to write this twice for issue that may result
+ * in first write getting masked.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+ }
+
+ if (IS_VALLEYVIEW(dev)) {
+ struct intel_digital_port *dport =
+ enc_to_dig_port(&encoder->base);
+ int channel = vlv_dport_to_channel(dport);
+
+ vlv_wait_port_ready(dev_priv, channel);
+ }
+}
+
+static void intel_disable_hdmi(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ u32 temp;
+ u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
+
+ temp = I915_READ(intel_hdmi->hdmi_reg);
+
+ /* HW workaround for IBX, we need to move the port to transcoder A
+ * before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(dev, pipe);
+ else
+ msleep(50);
+ }
+ }
+
+ /* HW workaround, need to toggle enable bit off and on for 12bpc, but
+ * we do this anyway which shows more stable in testing.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+ }
+
+ temp &= ~enable_bits;
+
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+
+ /* HW workaround, need to write this twice for issue that may result
+ * in first write getting masked.
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+ }
+}
+
+static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+{
+ struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+
+ if (IS_G4X(dev))
+ return 165000;
+ else if (IS_HASWELL(dev))
+ return 300000;
+ else
+ return 225000;
+}
+
+static int intel_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
+ return MODE_CLOCK_HIGH;
+ if (mode->clock < 20000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ return MODE_OK;
+}
+
+bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
+ int portclock_limit = hdmi_portclock_limit(intel_hdmi);
+ int desired_bpp;
+
+ if (intel_hdmi->color_range_auto) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ if (intel_hdmi->has_hdmi_sink &&
+ drm_match_cea_mode(adjusted_mode) > 1)
+ intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
+ else
+ intel_hdmi->color_range = 0;
+ }
+
+ if (intel_hdmi->color_range)
+ pipe_config->limited_color_range = true;
+
+ if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
+ pipe_config->has_pch_encoder = true;
+
+ /*
+ * HDMI is either 12 or 8, so if the display lets 10bpc sneak
+ * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
+ * outputs. We also need to check that the higher clock still fits
+ * within limits.
+ */
+ if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
+ && HAS_PCH_SPLIT(dev)) {
+ DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
+ desired_bpp = 12*3;
+
+ /* Need to adjust the port link by 1.5x for 12bpc. */
+ pipe_config->port_clock = clock_12bpc;
+ } else {
+ DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
+ desired_bpp = 8*3;
+ }
+
+ if (!pipe_config->bw_constrained) {
+ DRM_DEBUG_KMS("forcing pipe bpc to %i for HDMI\n", desired_bpp);
+ pipe_config->pipe_bpp = desired_bpp;
+ }
+
+ if (adjusted_mode->clock > portclock_limit) {
+ DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
+ return false;
+ }
+
+ return true;
+}
+
+static enum drm_connector_status
+intel_hdmi_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct edid *edid;
+ enum drm_connector_status status = connector_status_disconnected;
+
+ intel_hdmi->has_hdmi_sink = false;
+ intel_hdmi->has_audio = false;
+ intel_hdmi->rgb_quant_range_selectable = false;
+ edid = drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
+
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
+ intel_hdmi->has_hdmi_sink =
+ drm_detect_hdmi_monitor(edid);
+ intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
+ intel_hdmi->rgb_quant_range_selectable =
+ drm_rgb_quant_range_selectable(edid);
+ }
+ kfree(edid, EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1));
+ }
+
+ if (status == connector_status_connected) {
+ if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
+ intel_hdmi->has_audio =
+ (intel_hdmi->force_audio == HDMI_AUDIO_ON);
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+ }
+
+ return status;
+}
+
+static int intel_hdmi_get_modes(struct drm_connector *connector)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+ /* We should parse the EDID data and find out if it's an HDMI sink so
+ * we can send audio to it.
+ */
+
+ return intel_ddc_get_modes(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
+}
+
+static bool
+intel_hdmi_detect_audio(struct drm_connector *connector)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct edid *edid;
+ bool has_audio = false;
+
+ edid = drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+ kfree(edid, EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1));
+ }
+
+ return has_audio;
+}
+
+static int
+intel_hdmi_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ int ret;
+
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ enum hdmi_force_audio i = (enum hdmi_force_audio) val;
+ bool has_audio;
+
+ if (i == intel_hdmi->force_audio)
+ return 0;
+
+ intel_hdmi->force_audio = i;
+
+ if (i == HDMI_AUDIO_AUTO)
+ has_audio = intel_hdmi_detect_audio(connector);
+ else
+ has_audio = (i == HDMI_AUDIO_ON);
+
+ if (i == HDMI_AUDIO_OFF_DVI)
+ intel_hdmi->has_hdmi_sink = 0;
+
+ intel_hdmi->has_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ bool old_auto = intel_hdmi->color_range_auto;
+ uint32_t old_range = intel_hdmi->color_range;
+
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_hdmi->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_hdmi->color_range_auto = false;
+ intel_hdmi->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_hdmi->color_range_auto = false;
+ intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (old_auto == intel_hdmi->color_range_auto &&
+ old_range == intel_hdmi->color_range)
+ return 0;
+ goto done;
+ }
+
+ return -EINVAL;
+
+done:
+ if (intel_dig_port->base.base.crtc)
+ intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
+
+ return 0;
+}
+
+static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ int port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ u32 val;
+
+ if (!IS_VALLEYVIEW(dev))
+ return;
+
+ /* Enable clock channels for this port */
+ val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+ val = 0;
+ if (pipe)
+ val |= (1<<21);
+ else
+ val &= ~(1<<21);
+ val |= 0x001000c4;
+ vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+
+ /* HDMI 1.0V-2dB */
+ vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0);
+ vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port),
+ 0x2b245f5f);
+ vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
+ 0x5578b83a);
+ vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port),
+ 0x0c782040);
+ vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port),
+ 0x2b247878);
+ vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
+ vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
+ 0x00002000);
+ vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
+ DPIO_TX_OCALINIT_EN);
+
+ /* Program lane clock */
+ vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
+ 0x00760018);
+ vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
+ 0x00400888);
+}
+
+static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int port = vlv_dport_to_channel(dport);
+
+ if (!IS_VALLEYVIEW(dev))
+ return;
+
+ /* Program Tx lane resets to default */
+ vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
+ DPIO_PCS_TX_LANE2_RESET |
+ DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
+ DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+ DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+ (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
+ DPIO_PCS_CLK_SOFT_RESET);
+
+ /* Fix up inter-pair skew failure */
+ vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
+ vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+
+ vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
+ 0x00002000);
+ vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
+ DPIO_TX_OCALINIT_EN);
+}
+
+static void intel_hdmi_post_disable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ int port = vlv_dport_to_channel(dport);
+
+ /* Reset lanes to avoid HDMI flicker (VLV w/a) */
+ mutex_lock(&dev_priv->dpio_lock);
+ vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000);
+ vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060);
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+static void intel_hdmi_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+ kfree(connector, sizeof(struct intel_connector));
+}
+
+static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
+ .mode_set = intel_hdmi_mode_set,
+};
+
+static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_hdmi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_hdmi_set_property,
+ .destroy = intel_hdmi_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
+ .get_modes = intel_hdmi_get_modes,
+ .mode_valid = intel_hdmi_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+static void
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
+{
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+ intel_hdmi->color_range_auto = true;
+}
+
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
+{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+
+ (void) drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ (void) drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
+
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+ switch (port) {
+ case PORT_B:
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+ intel_encoder->hpd_pin = HPD_PORT_B;
+ break;
+ case PORT_C:
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+ intel_encoder->hpd_pin = HPD_PORT_C;
+ break;
+ case PORT_D:
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+ intel_encoder->hpd_pin = HPD_PORT_D;
+ break;
+ case PORT_A:
+ intel_encoder->hpd_pin = HPD_PORT_A;
+ /* Internal port only for eDP. */
+ default:
+ BUG();
+ }
+
+ if (IS_VALLEYVIEW(dev)) {
+ intel_hdmi->write_infoframe = vlv_write_infoframe;
+ intel_hdmi->set_infoframes = vlv_set_infoframes;
+ } else if (!HAS_PCH_SPLIT(dev)) {
+ intel_hdmi->write_infoframe = g4x_write_infoframe;
+ intel_hdmi->set_infoframes = g4x_set_infoframes;
+ } else if (HAS_DDI(dev)) {
+ intel_hdmi->write_infoframe = hsw_write_infoframe;
+ intel_hdmi->set_infoframes = hsw_set_infoframes;
+ } else if (HAS_PCH_IBX(dev)) {
+ intel_hdmi->write_infoframe = ibx_write_infoframe;
+ intel_hdmi->set_infoframes = ibx_set_infoframes;
+ } else {
+ intel_hdmi->write_infoframe = cpt_write_infoframe;
+ intel_hdmi->set_infoframes = cpt_set_infoframes;
+ }
+
+ if (HAS_DDI(dev))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_hdmi_add_properties(intel_hdmi, connector);
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+// drm_sysfs_connector_add(connector);
+
+ /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ if (IS_G4X(dev) && !IS_GM45(dev)) {
+ u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+ I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ }
+}
+
+void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
+{
+ struct intel_digital_port *intel_dig_port;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
+
+ intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+ if (!intel_dig_port)
+ return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_dig_port, sizeof(struct intel_digital_port));
+ return;
+ }
+
+ intel_encoder = &intel_dig_port->base;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+
+ intel_encoder->compute_config = intel_hdmi_compute_config;
+ intel_encoder->enable = intel_enable_hdmi;
+ intel_encoder->disable = intel_disable_hdmi;
+ intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+ intel_encoder->get_config = intel_hdmi_get_config;
+ if (IS_VALLEYVIEW(dev)) {
+ intel_encoder->pre_enable = intel_hdmi_pre_enable;
+ intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
+ intel_encoder->post_disable = intel_hdmi_post_disable;
+ }
+
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+ intel_encoder->type_size = sizeof(struct intel_digital_port);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->cloneable = false;
+
+ intel_dig_port->port = port;
+ intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
+ intel_dig_port->dp.output_reg = 0;
+
+ intel_hdmi_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/usr/src/uts/intel/io/i915/intel_i2c.c b/usr/src/uts/intel/io/i915/intel_i2c.c
new file mode 100644
index 0000000..a3d4ca1
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_i2c.c
@@ -0,0 +1,618 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2006-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include <sys/types.h>
+#include <sys/ddi.h>
+#include "drmP.h"
+#include "drm.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "drm_sun_i2c.h"
+
+struct gmbus_port {
+ const char *name;
+ int reg;
+};
+
+static const struct gmbus_port gmbus_ports[] = {
+ { "ssc", GPIOB },
+ { "vga", GPIOA },
+ { "panel", GPIOC },
+ { "dpc", GPIOD },
+ { "dpb", GPIOE },
+ { "dpd", GPIOF },
+};
+
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 10
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+ return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+void
+intel_i2c_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 val;
+
+ /* When using bit bashing for I2C, this bit needs to be set to 1 */
+ if (!IS_PINEVIEW(dev_priv->dev))
+ return;
+
+ val = I915_READ(DSPCLK_GATE_D);
+ if (enable)
+ val |= DPCUNIT_CLOCK_GATE_DISABLE;
+ else
+ val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
+}
+
+static u32 get_reserved(struct intel_gmbus *bus)
+{
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = 0;
+
+ /* On most chips, these bits must be preserved in software. */
+ if (!IS_I830(dev) && !IS_845G(dev))
+ reserved = I915_READ_NOTRACE(bus->gpio_reg) &
+ (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ return reserved;
+}
+
+static int vga_get_clock(void *data)
+{
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int vga_get_data(void *data)
+{
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
+}
+
+static int get_clock(void *data)
+{
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
+ return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
+ return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
+ u32 clock_bits;
+
+ if (state_high)
+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+ else
+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+ GPIO_CLOCK_VAL_MASK;
+
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits);
+ POSTING_READ(bus->gpio_reg);
+}
+
+static void set_data(void *data, int state_high)
+{
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
+ u32 data_bits;
+
+ if (state_high)
+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+ else
+ data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+ GPIO_DATA_VAL_MASK;
+
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits);
+ POSTING_READ(bus->gpio_reg);
+}
+
+static int
+intel_gpio_pre_xfer(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ intel_i2c_reset(dev_priv->dev);
+ intel_i2c_quirk_set(dev_priv, true);
+ set_data(bus, 1);
+ set_clock(bus, 1);
+ udelay(I2C_RISEFALL_TIME);
+ return 0;
+}
+
+static void
+intel_gpio_post_xfer(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ set_data(bus, 1);
+ set_clock(bus, 1);
+ intel_i2c_quirk_set(dev_priv, false);
+}
+
+static void
+intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
+{
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct i2c_adapter *algo;
+
+ algo = &bus->adapter;
+
+ /* -1 to map pin pair to gmbus index */
+ bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
+
+ bus->adapter.algo_data = algo;
+ /* OSOL_i915 Begin */
+ algo->setsda = set_data;
+ algo->setscl = set_clock;
+ if (pin == 2) {
+ algo->getsda = vga_get_data;
+ algo->getscl = vga_get_clock;
+ } else {
+ algo->getsda = get_data;
+ algo->getscl = get_clock;
+ }
+ algo->udelay = I2C_RISEFALL_TIME;
+ algo->timeout = drv_usectohz(2200);
+ /* OSOL End */
+ algo->data = bus;
+}
+
+/*
+ * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
+ * mode. This results in spurious interrupt warnings if the legacy irq no. is
+ * shared with another device. The kernel then disables that interrupt source
+ * and so prevents the other device from working properly.
+ */
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+static int
+gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
+ u32 gmbus2_status,
+ u32 gmbus4_irq_en)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u32 gmbus2 = 0;
+ int ret;
+ if (!HAS_GMBUS_IRQ(dev_priv->dev))
+ gmbus4_irq_en = 0;
+
+ /* Important: The hw handles only the first bit, so set only one! Since
+ * we also need to check for NAKs besides the hw ready/idle signal, we
+ * need to wake up periodically and check that ourselves. */
+ I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
+
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset))
+ & (GMBUS_SATOER | gmbus2_status), 50);
+
+ I915_WRITE(GMBUS4 + reg_offset, 0);
+
+ if (ret)
+ return -ETIMEDOUT;
+
+ if (gmbus2 & GMBUS_SATOER)
+ return -ENXIO;
+ if (gmbus2 & gmbus2_status)
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+static int
+gmbus_wait_idle(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ int reg_offset = dev_priv->gpio_mmio_base;
+
+#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0)
+
+ if (!HAS_GMBUS_IRQ(dev_priv->dev))
+ return wait_for(C, 10);
+
+ /* Important: The hw handles only the first bit, so set only one! */
+ I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
+
+ ret = wait_for(C, 10);
+
+ I915_WRITE(GMBUS4 + reg_offset, 0);
+
+ if (!ret)
+ return 0;
+ else
+ return -ETIMEDOUT;
+#undef C
+}
+
+static int
+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ u32 gmbus1_index)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u16 len = msg->len;
+ u8 *buf = msg->buf;
+
+ I915_WRITE(GMBUS1 + reg_offset,
+ gmbus1_index |
+ GMBUS_CYCLE_WAIT |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+ u32 val, loop = 0;
+
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
+ GMBUS_HW_RDY_EN);
+ if (ret)
+ return ret;
+
+ val = I915_READ(GMBUS3 + reg_offset);
+ do {
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len && ++loop < 4);
+ }
+
+ return 0;
+}
+
+static int
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u16 len = msg->len;
+ u8 *buf = msg->buf;
+ u32 val, loop;
+
+ val = loop = 0;
+ while (len && loop < 4) {
+ val |= *buf++ << (8 * loop++);
+ len -= 1;
+ }
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ I915_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT |
+ (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
+ GMBUS_HW_RDY_EN);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * The gmbus controller can combine a 1 or 2 byte write with a read that
+ * immediately follows it by using an "INDEX" cycle.
+ */
+static bool
+gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
+{
+ return (i + 1 < num &&
+ !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+ (msgs[i + 1].flags & I2C_M_RD));
+}
+
+static int
+gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u32 gmbus1_index = 0;
+ u32 gmbus5 = 0;
+ int ret;
+
+ if (msgs[0].len == 2)
+ gmbus5 = GMBUS_2BYTE_INDEX_EN |
+ msgs[0].buf[1] | (msgs[0].buf[0] << 8);
+ if (msgs[0].len == 1)
+ gmbus1_index = GMBUS_CYCLE_INDEX |
+ (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
+
+ /* GMBUS5 holds 16-bit index */
+ if (gmbus5)
+ I915_WRITE(GMBUS5 + reg_offset, gmbus5);
+
+ ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
+
+ /* Clear GMBUS5 after each index transfer */
+ if (gmbus5)
+ I915_WRITE(GMBUS5 + reg_offset, 0);
+
+ return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ int i, reg_offset;
+ int ret = 0;
+
+ mutex_lock(&dev_priv->gmbus_mutex);
+
+ if (bus->force_bit) {
+ ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+ goto out;
+ }
+
+ reg_offset = dev_priv->gpio_mmio_base;
+
+ I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+ for (i = 0; i < num; i++) {
+ if (gmbus_is_index_read(msgs, i, num)) {
+ ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
+ i += 1; /* set i to the index of the read xfer */
+ } else if (msgs[i].flags & I2C_M_RD) {
+ ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
+ } else {
+ ret = gmbus_xfer_write(dev_priv, &msgs[i]);
+ }
+
+ if (ret == -ETIMEDOUT)
+ goto timeout;
+ if (ret == -ENXIO)
+ goto clear_err;
+
+ ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
+ GMBUS_HW_WAIT_EN);
+ if (ret == -ENXIO)
+ goto clear_err;
+ if (ret)
+ goto timeout;
+ }
+
+ /* Generate a STOP condition on the bus. Note that gmbus can't generata
+ * a STOP on the very first cycle. To simplify the code we
+ * unconditionally generate the STOP condition with an additional gmbus
+ * cycle. */
+ I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+
+ /* Mark the GMBUS interface as disabled after waiting for idle.
+ * We will re-enable it at the start of the next xfer,
+ * till then let it sleep.
+ */
+ if (gmbus_wait_idle(dev_priv)) {
+ DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
+ adapter->name);
+ ret = -ETIMEDOUT;
+ }
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+ ret = ret ?: i;
+ goto out;
+
+clear_err:
+ /*
+ * Wait for bus to IDLE before clearing NAK.
+ * If we clear the NAK while bus is still active, then it will stay
+ * active and the next transaction may fail.
+ *
+ * If no ACK is received during the address phase of a transaction, the
+ * adapter must report -ENXIO. It is not clear what to return if no ACK
+ * is received at other times. But we have to be careful to not return
+ * spurious -ENXIO because that will prevent i2c and drm edid functions
+ * from retrying. So return -ENXIO only when gmbus properly quiescents -
+ * timing out seems to happen when there _is_ a ddc chip present, but
+ * it's slow responding and only answers on the 2nd retry.
+ */
+ ret = -ENXIO;
+ if (gmbus_wait_idle(dev_priv)) {
+ DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
+ adapter->name);
+ ret = -ETIMEDOUT;
+ }
+
+ /* Toggle the Software Clear Interrupt bit. This has the effect
+ * of resetting the GMBUS controller and so clearing the
+ * BUS_ERROR raised by the slave's NAK.
+ */
+ I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+ I915_WRITE(GMBUS1 + reg_offset, 0);
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+
+ DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ adapter->name, msgs[i].addr,
+ (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+
+ goto out;
+
+timeout:
+ DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+ bus->adapter.name, bus->reg0 & 0xff);
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+
+ /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+ bus->force_bit = 1;
+ ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+
+out:
+ mutex_unlock(&dev_priv->gmbus_mutex);
+ return ret;
+}
+
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+ return adapter->algo->functionality(adapter);
+}
+
+static struct i2c_algorithm gmbus_algorithm = {
+ .master_xfer = gmbus_xfer,
+ .functionality = gmbus_func
+};
+
+/**
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+ * @dev: DRM device
+ */
+int intel_setup_gmbus(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (HAS_PCH_NOP(dev))
+ return 0;
+ else if (HAS_PCH_SPLIT(dev))
+ dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
+ else if (IS_VALLEYVIEW(dev))
+ dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
+ else
+ dev_priv->gpio_mmio_base = 0;
+
+ mutex_init(&dev_priv->gmbus_mutex, NULL, MUTEX_DRIVER, NULL);
+ DRM_INIT_WAITQUEUE(&dev_priv->gmbus_wait_queue, DRM_INTR_PRI(dev));
+
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ u32 port = i + 1; /* +1 to map gmbus index to pin pair */
+
+ snprintf(bus->adapter.name,
+ sizeof(bus->adapter.name),
+ "i915 gmbus %s",
+ gmbus_ports[i].name);
+
+// bus->adapter.dev.parent = &dev->pdev->dev;
+ bus->dev_priv = dev_priv;
+
+ bus->adapter.algo = &gmbus_algorithm;
+
+ /* By default use a conservative clock rate */
+ bus->reg0 = port | GMBUS_RATE_100KHZ;
+
+ /* gmbus seems to be broken on i830 */
+ if (IS_I830(dev))
+ bus->force_bit = 1;
+
+ intel_gpio_setup(bus, port);
+ }
+
+ intel_i2c_reset(dev_priv->dev);
+
+ return 0;
+}
+
+struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
+ unsigned port)
+{
+ WARN_ON(!intel_gmbus_is_port_valid(port));
+ /* -1 to map pin pair to gmbus index */
+ return (intel_gmbus_is_port_valid(port)) ?
+ &dev_priv->gmbus[port - 1].adapter : NULL;
+}
+
+void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed;
+}
+
+void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ bus->force_bit += force_bit ? 1 : -1;
+ DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
+ force_bit ? "en" : "dis", adapter->name,
+ bus->force_bit);
+}
+
+void intel_teardown_gmbus(struct drm_device *dev)
+{
+/*
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+// i2c_del_adapter(&bus->adapter);
+ }
+*/
+}
+
+/* workaround for fixing hdmi issue */
+void intel_gmbus_hdmi_set_adapter(struct i2c_adapter *adapter)
+{
+ adapter->getsda = vga_get_data;
+ adapter->getscl = vga_get_clock;
+}
diff --git a/usr/src/uts/intel/io/i915/intel_lvds.c b/usr/src/uts/intel/io/i915/intel_lvds.c
new file mode 100644
index 0000000..abecba7
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_lvds.c
@@ -0,0 +1,1114 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006-2007, 2013, Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "drm_sun_i2c.h" /* OSOL_i915 */
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/* Private structure for the integrated LVDS support */
+struct intel_lvds_connector {
+ struct intel_connector base;
+ int fitting_mode;
+};
+
+struct intel_lvds_encoder {
+ struct intel_encoder base;
+
+ bool is_dual_link;
+ u32 reg;
+
+ struct intel_lvds_connector *attached_connector;
+};
+
+static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_lvds_encoder, base.base);
+}
+
+static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct intel_lvds_connector, base.base);
+}
+
+static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ u32 tmp;
+
+ tmp = I915_READ(lvds_encoder->reg);
+
+ if (!(tmp & LVDS_PORT_EN))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_lvds_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lvds_reg, tmp, flags = 0;
+
+ if (HAS_PCH_SPLIT(dev))
+ lvds_reg = PCH_LVDS;
+ else
+ lvds_reg = LVDS;
+
+ tmp = I915_READ(lvds_reg);
+ if (tmp & LVDS_HSYNC_POLARITY)
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ if (tmp & LVDS_VSYNC_POLARITY)
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_PVSYNC;
+
+ pipe_config->adjusted_mode.flags |= flags;
+
+ /* gen2/3 store dither state in pfit control, needs to match */
+ if (INTEL_INFO(dev)->gen < 4) {
+ tmp = I915_READ(PFIT_CONTROL);
+
+ pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+ }
+}
+
+/* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
+{
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *fixed_mode =
+ lvds_encoder->attached_connector->base.panel.fixed_mode;
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ temp = I915_READ(lvds_encoder->reg);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~PORT_TRANS_SEL_MASK;
+ temp |= PORT_TRANS_SEL_CPT(pipe);
+ } else {
+ if (pipe == 1) {
+ temp |= LVDS_PIPEB_SELECT;
+ } else {
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+ }
+
+ /* set the corresponsding LVDS_BORDER bit */
+ temp &= ~LVDS_BORDER_ENABLE;
+ temp |= intel_crtc->config.gmch_pfit.lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (lvds_encoder->is_dual_link)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
+
+ /* Set the dithering flag on LVDS as needed, note that there is no
+ * special lvds dither control bit on pch-split platforms, dithering is
+ * only controlled through the PIPECONF reg. */
+ if (INTEL_INFO(dev)->gen == 4) {
+ /* Bspec wording suggests that LVDS port dithering only exists
+ * for 18bpp panels. */
+ if (intel_crtc->config.dither &&
+ intel_crtc->config.pipe_bpp == 18)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
+ }
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+
+ I915_WRITE(lvds_encoder->reg, temp);
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+static void intel_enable_lvds(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 ctl_reg, stat_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+ stat_reg = PCH_PP_STATUS;
+ } else {
+ ctl_reg = PP_CONTROL;
+ stat_reg = PP_STATUS;
+ }
+
+ I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
+
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+ POSTING_READ(lvds_encoder->reg);
+ if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power on\n");
+
+ intel_panel_enable_backlight(dev, intel_crtc->pipe);
+}
+
+static void intel_disable_lvds(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 ctl_reg, stat_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+ stat_reg = PCH_PP_STATUS;
+ } else {
+ ctl_reg = PP_CONTROL;
+ stat_reg = PP_STATUS;
+ }
+
+ intel_panel_disable_backlight(dev);
+
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+ if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power off\n");
+
+ I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
+ POSTING_READ(lvds_encoder->reg);
+}
+
+static int intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+
+ return MODE_OK;
+}
+
+static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_lvds_encoder *lvds_encoder =
+ to_lvds_encoder(&intel_encoder->base);
+ struct intel_connector *intel_connector =
+ &lvds_encoder->attached_connector->base;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
+ unsigned int lvds_bpp;
+
+ /* Should never happen!! */
+ if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
+ DRM_ERROR("Can't support LVDS on pipe A\n");
+ return false;
+ }
+
+ if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
+ LVDS_A3_POWER_UP)
+ lvds_bpp = 8*3;
+ else
+ lvds_bpp = 6*3;
+
+ if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
+ DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
+ pipe_config->pipe_bpp, lvds_bpp);
+ pipe_config->pipe_bpp = lvds_bpp;
+ }
+
+ /*
+ * We have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+ adjusted_mode);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ pipe_config->has_pch_encoder = true;
+
+ intel_pch_panel_fitting(intel_crtc, pipe_config,
+ intel_connector->panel.fitting_mode);
+ } else {
+ intel_gmch_panel_fitting(intel_crtc, pipe_config,
+ intel_connector->panel.fitting_mode);
+
+ }
+
+ /*
+ * XXX: It would be nice to support lower refresh rates on the
+ * panels to reduce power consumption, and perhaps match the
+ * user's requested refresh rate.
+ */
+
+ return true;
+}
+
+static void intel_lvds_mode_set(struct drm_encoder *encoder,
+ /* LINTED */
+ struct drm_display_mode *mode,
+ /* LINTED */
+ struct drm_display_mode *adjusted_mode)
+{
+ /*
+ * The LVDS pin pair will already have been turned on in the
+ * intel_crtc_mode_set since it has a large impact on the DPLL
+ * settings.
+ */
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
+ * connected and closed means disconnected. We also send hotplug events as
+ * needed, using lid status notification from the input layer.
+ */
+static enum drm_connector_status
+intel_lvds_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status status;
+
+ status = intel_panel_detect(dev);
+ if (status != connector_status_unknown)
+ return status;
+
+ return connector_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int intel_lvds_get_modes(struct drm_connector *connector)
+{
+ struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+
+ /* use cached edid if we have one */
+ if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+ return drm_add_edid_modes(connector, lvds_connector->base.edid);
+
+ mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
+ if (mode == NULL)
+ return 0;
+
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
+/* OSOL_I915 */
+#if 0
+static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
+ return 1;
+}
+
+/* The GPU hangs up on these systems if modeset is performed on LID open */
+static const struct dmi_system_id intel_no_modeset_on_lid[] = {
+ {
+ .callback = intel_no_modeset_on_lid_dmi_callback,
+ .ident = "Toshiba Tecra A11",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
+ },
+ },
+
+ { } /* terminating entry */
+};
+
+
+/*
+ * Lid events. Note the use of 'modeset_on_lid':
+ * - we set it on lid close, and reset it on open
+ * - we use it as a "only once" bit (ie we ignore
+ * duplicate events where it was already properly
+ * set/reset)
+ * - the suspend/resume paths will also set it to
+ * zero, since they restore the mode ("lid open").
+ */
+static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
+ void *unused)
+{
+ struct intel_lvds_connector *lvds_connector =
+ container_of(nb, struct intel_lvds_connector, lid_notifier);
+ struct drm_connector *connector = &lvds_connector->base.base;
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ return NOTIFY_OK;
+
+ mutex_lock(&dev_priv->modeset_restore_lock);
+ if (dev_priv->modeset_restore == MODESET_SUSPENDED)
+ goto exit;
+ /*
+ * check and update the status of LVDS connector after receiving
+ * the LID nofication event.
+ */
+ connector->status = connector->funcs->detect(connector, false);
+
+ /* Don't force modeset on machines where it causes a GPU lockup */
+ if (dmi_check_system(intel_no_modeset_on_lid))
+ goto exit;
+ if (!acpi_lid_open()) {
+ /* do modeset on next lid open event */
+ dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
+ goto exit;
+ }
+
+ if (dev_priv->modeset_restore == MODESET_DONE)
+ goto exit;
+
+ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(dev, true);
+ drm_modeset_unlock_all(dev);
+
+ dev_priv->modeset_restore = MODESET_DONE;
+
+exit:
+ mutex_unlock(&dev_priv->modeset_restore_lock);
+ return NOTIFY_OK;
+}
+#endif
+
+/**
+ * intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+static void intel_lvds_destroy(struct drm_connector *connector)
+{
+ struct intel_lvds_connector *lvds_connector =
+ to_lvds_connector(connector);
+
+ if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+ kfree(lvds_connector->base.edid, EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1));
+
+ intel_panel_fini(&lvds_connector->base.panel);
+ /* OSOL_i915: drm_sysfs_connector_remove(connector); */
+ drm_connector_cleanup(connector);
+ kfree(lvds_connector, sizeof (struct intel_lvds_connector));
+}
+
+static int intel_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ struct drm_crtc *crtc;
+
+ if (value == DRM_MODE_SCALE_NONE) {
+ DRM_DEBUG_KMS("no scaling not supported\n");
+ return -EINVAL;
+ }
+
+ if (intel_connector->panel.fitting_mode == value) {
+ /* the LVDS scaling property is not changed */
+ return 0;
+ }
+ intel_connector->panel.fitting_mode = (int) value;
+
+ crtc = intel_attached_encoder(connector)->base.crtc;
+ if (crtc && crtc->enabled) {
+ /*
+ * If the CRTC is enabled, the display will be changed
+ * according to the new panel fitting mode.
+ */
+ intel_crtc_restore_mode(crtc);
+ }
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
+ .mode_set = intel_lvds_mode_set,
+};
+
+static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
+ .get_modes = intel_lvds_get_modes,
+ .mode_valid = intel_lvds_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_connector_funcs intel_lvds_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_lvds_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_lvds_set_property,
+ .destroy = intel_lvds_destroy,
+};
+
+static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+/* OSOL_I915 */
+#if 0
+static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
+ return 1;
+}
+
+/* These systems claim to have LVDS, but really don't */
+static const struct dmi_system_id intel_no_lvds[] = {
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Apple Mac Mini (Core series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Apple Mac Mini (Core 2 series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "MSI IM-945GSE-A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "A9830IMS"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Dell Studio Hybrid",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Dell OptiPlex FX170",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen Mini PC",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen Mini PC MP915",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i915GMm-HFS",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i45GMx-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Aopen i945GTt-VFA",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron U800",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron E830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus EeeBox PC EB1007",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus AT5NM10T-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard HP t5740",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, " t5740"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard t5745",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard st5747",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "MSI Wind Box DC500",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Gigabyte GA-D525TUD",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Supermicro X7SPA-H",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Fujitsu Esprimo Q900",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Intel D510MO",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "D510MO"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Intel D525MW",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
+ },
+ },
+
+ { } /* terminating entry */
+};
+#endif
+
+/**
+ * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @connector: LVDS connector
+ *
+ * Find the reduced downclock for LVDS in EDID.
+ */
+static void intel_find_lvds_downclock(struct drm_device *dev,
+ struct drm_display_mode *fixed_mode,
+ struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *scan;
+ int temp_downclock;
+
+ temp_downclock = fixed_mode->clock;
+ list_for_each_entry(scan, struct drm_display_mode, &connector->probed_modes, head) {
+ /*
+ * If one mode has the same resolution with the fixed_panel
+ * mode while they have the different refresh rate, it means
+ * that the reduced downclock is found for the LVDS. In such
+ * case we can set the different FPx0/1 to dynamically select
+ * between low and high frequency.
+ */
+ if (scan->hdisplay == fixed_mode->hdisplay &&
+ scan->hsync_start == fixed_mode->hsync_start &&
+ scan->hsync_end == fixed_mode->hsync_end &&
+ scan->htotal == fixed_mode->htotal &&
+ scan->vdisplay == fixed_mode->vdisplay &&
+ scan->vsync_start == fixed_mode->vsync_start &&
+ scan->vsync_end == fixed_mode->vsync_end &&
+ scan->vtotal == fixed_mode->vtotal) {
+ if (scan->clock < temp_downclock) {
+ /*
+ * The downclock is already found. But we
+ * expect to find the lower downclock.
+ */
+ temp_downclock = scan->clock;
+ }
+ }
+ }
+ if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
+ /* We found the downclock for LVDS. */
+ dev_priv->lvds_downclock_avail = 1;
+ dev_priv->lvds_downclock = temp_downclock;
+ DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
+ "Normal clock %dKhz, downclock %dKhz\n",
+ fixed_mode->clock, temp_downclock);
+ }
+}
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ */
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+ u8 *i2c_pin)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return true;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ struct child_device_config *child = dev_priv->vbt.child_dev + i;
+ /*
+ * If the device type is not LFP, continue.
+ * If the device type is 0x22, it is also regarded as LFP.
+ * old for compatibility with some BIOSes.
+ */
+ if (child->device_type != DEVICE_TYPE_INT_LFP &&
+ child->device_type != DEVICE_TYPE_LFP)
+ continue;
+
+ if (intel_gmbus_is_port_valid(child->i2c_pin))
+ *i2c_pin = child->i2c_pin;
+
+ /* However, we cannot trust the BIOS writers to populate
+ * the VBT correctly. Since LVDS requires additional
+ * information from AIM blocks, a non-zero addin offset is
+ * a good indicator that the LVDS is actually present.
+ */
+ if (child->addin_offset)
+ return true;
+
+ /* But even then some BIOS writers perform some black magic
+ * and instantiate the device without reference to any
+ * additional data. Trust that if the VBT was written into
+ * the OpRegion then they have validated the LVDS's existence.
+ */
+ if (dev_priv->opregion.vbt)
+ return true;
+ }
+
+ return false;
+}
+
+bool intel_is_dual_link_lvds(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_lvds_encoder *lvds_encoder;
+
+ list_for_each_entry(encoder, struct intel_encoder,
+ &dev->mode_config.encoder_list, base.head) {
+ if (encoder->type == INTEL_OUTPUT_LVDS) {
+ lvds_encoder = to_lvds_encoder(&encoder->base);
+
+ return lvds_encoder->is_dual_link;
+ }
+ }
+
+ return false;
+}
+
+static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
+{
+ struct drm_device *dev = lvds_encoder->base.base.dev;
+ unsigned int val;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* use the module option value if specified */
+ if (i915_lvds_channel_mode > 0)
+ return i915_lvds_channel_mode == 2;
+
+ /* BIOS should set the proper LVDS register value at boot, but
+ * in reality, it doesn't set the value when the lid is closed;
+ * we need to check "the value to be set" in VBT when LVDS
+ * register is uninitialized.
+ */
+ val = I915_READ(lvds_encoder->reg);
+ if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
+ val = dev_priv->vbt.bios_lvds_val;
+
+ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+}
+
+static bool intel_lvds_supported(struct drm_device *dev)
+{
+ /* With the introduction of the PCH we gained a dedicated
+ * LVDS presence pin, use it. */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ return true;
+
+ /* Otherwise LVDS was only attached to mobile products,
+ * except for the inglorious 830gm */
+ if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+ return true;
+
+ return false;
+}
+
+/**
+ * intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void intel_lvds_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_lvds_encoder *lvds_encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_lvds_connector *lvds_connector;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ struct drm_display_mode *fixed_mode = NULL;
+ struct edid *edid;
+ struct drm_crtc *crtc;
+ u32 lvds;
+ int pipe;
+ u8 pin;
+
+ if (!intel_lvds_supported(dev))
+ return;
+
+ /* Skip init on machines we know falsely report LVDS */
+ //XXX if (dmi_check_system(intel_no_lvds))
+ // return;
+
+ pin = GMBUS_PORT_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ return;
+ }
+
+ if (HAS_PCH_SPLIT(dev)) {
+ if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
+ return;
+ if (dev_priv->vbt.edp_support) {
+ DRM_DEBUG_KMS("disable LVDS for eDP support\n");
+ return;
+ }
+ }
+
+ lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
+ if (!lvds_encoder)
+ return;
+
+ lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
+ if (!lvds_connector) {
+ kfree(lvds_encoder, sizeof(*lvds_encoder));
+ return;
+ }
+
+ lvds_encoder->attached_connector = lvds_connector;
+
+ intel_encoder = &lvds_encoder->base;
+ encoder = &intel_encoder->base;
+ intel_connector = &lvds_connector->base;
+ connector = &intel_connector->base;
+ (void) drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ (void) drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+ intel_encoder->enable = intel_enable_lvds;
+ intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
+ intel_encoder->compute_config = intel_lvds_compute_config;
+ intel_encoder->disable = intel_disable_lvds;
+ intel_encoder->get_hw_state = intel_lvds_get_hw_state;
+ intel_encoder->get_config = intel_lvds_get_config;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_encoder->type = INTEL_OUTPUT_LVDS;
+ intel_encoder->type_size = sizeof(struct intel_lvds_encoder);
+
+ intel_encoder->cloneable = false;
+ if (HAS_PCH_SPLIT(dev))
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ else if (IS_GEN4(dev))
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ else
+ intel_encoder->crtc_mask = (1 << 1);
+
+ drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
+ drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ lvds_encoder->reg = PCH_LVDS;
+ } else {
+ lvds_encoder->reg = LVDS;
+ }
+
+ /* create the scaling mode property */
+ (void) drm_mode_create_scaling_mode_property(dev);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_ASPECT);
+ intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ * 4) make sure lid is open
+ * if closed, act like it's not there for now
+ */
+
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
+ if (edid) {
+ if (drm_add_edid_modes(connector, edid)) {
+ (void) drm_mode_connector_update_edid_property(connector,
+ edid);
+ } else {
+ kfree(edid, EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1));
+ edid = NULL;
+ }
+ } else {
+ edid = NULL;
+ }
+ lvds_connector->base.edid = edid;
+
+ if (IS_ERR_OR_NULL(edid)) {
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+ }
+
+ list_for_each_entry(scan, struct drm_display_mode, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ DRM_DEBUG_KMS("using preferred mode from EDID: ");
+ drm_mode_debug_printmodeline(scan);
+
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ if (fixed_mode) {
+ intel_find_lvds_downclock(dev, fixed_mode,
+ connector);
+ goto out;
+ }
+ }
+ }
+
+ /* Failed to get EDID, what about VBT? */
+ if (dev_priv->vbt.lfp_lvds_vbt_mode) {
+ DRM_DEBUG_KMS("using mode from VBT: ");
+ drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode);
+
+ fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
+ if (fixed_mode) {
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ goto out;
+ }
+ }
+
+ /*
+ * If we didn't get EDID, try checking if the panel is already turned
+ * on. If so, assume that whatever is currently programmed is the
+ * correct mode.
+ */
+
+ /* Ironlake: FIXME if still fail, not try pipe mode now */
+ if (HAS_PCH_SPLIT(dev))
+ goto failed;
+
+ lvds = I915_READ(LVDS);
+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+ crtc = intel_get_crtc_for_pipe(dev, pipe);
+
+ if (crtc && (lvds & LVDS_PORT_EN)) {
+ fixed_mode = intel_crtc_mode_get(dev, crtc);
+ if (fixed_mode) {
+ DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_mode_debug_printmodeline(fixed_mode);
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ goto out;
+ }
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!fixed_mode)
+ goto failed;
+
+out:
+ lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
+ DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
+ lvds_encoder->is_dual_link ? "dual" : "single");
+
+ /*
+ * Unlock registers and just
+ * leave them unlocked
+ */
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(PCH_PP_CONTROL,
+ I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+ } else {
+ I915_WRITE(PP_CONTROL,
+ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+ }
+ // XXX dev_priv->lid_notifier.notifier_call = intel_lid_notify;
+ /* XXX lack of acpi support
+ if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
+ DRM_DEBUG("lid notifier registration failed\n");
+ dev_priv->lid_notifier.notifier_call = NULL;
+ } */
+ /* OSOL_i915: drm_sysfs_connector_add(connector); */
+
+ intel_panel_init(&intel_connector->panel, fixed_mode);
+ if (intel_panel_setup_backlight(connector) == -ENODEV)
+ DRM_DEBUG_KMS("Could not find backlight for dev %p\n", dev);
+
+ return;
+
+failed:
+ DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
+ drm_connector_cleanup(connector);
+ drm_encoder_cleanup(encoder);
+ if (fixed_mode)
+ drm_mode_destroy(dev, fixed_mode);
+ kfree(lvds_encoder, sizeof(*lvds_encoder));
+ kfree(lvds_connector, sizeof(*lvds_connector));
+ return;
+}
diff --git a/usr/src/uts/intel/io/i915/intel_modes.c b/usr/src/uts/intel/io/i915/intel_modes.c
new file mode 100644
index 0000000..ada38bc
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_modes.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "intel_drv.h"
+#include "i915_drv.h"
+/* OSOL_i915 Begin */
+#include "drm_sun_i2c.h"
+#include "drm_edid.h"
+/* OSOL_i915 End */
+
+/**
+ * intel_connector_update_modes - update connector from edid
+ * @connector: DRM connector device to use
+ * @edid: previously read EDID information
+ */
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ int ret;
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+
+ return ret;
+}
+
+/**
+ * intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ * @adapter: i2c adapter
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, adapter);
+ if (!edid)
+ return 0;
+
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid, (EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1)));
+ return ret;
+}
+
+static const struct drm_prop_enum_list force_audio_names[] = {
+ { HDMI_AUDIO_OFF_DVI, "force-dvi" },
+ { HDMI_AUDIO_OFF, "off" },
+ { HDMI_AUDIO_AUTO, "auto" },
+ { HDMI_AUDIO_ON, "on" },
+};
+
+void
+intel_attach_force_audio_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+
+ prop = dev_priv->force_audio_property;
+ if (prop == NULL) {
+ prop = drm_property_create_enum(dev, 0,
+ "audio",
+ force_audio_names,
+ ARRAY_SIZE(force_audio_names));
+ if (prop == NULL)
+ return;
+
+ dev_priv->force_audio_property = prop;
+ }
+ drm_object_attach_property(&connector->base, prop, 0);
+}
+
+static const struct drm_prop_enum_list broadcast_rgb_names[] = {
+ { INTEL_BROADCAST_RGB_AUTO, "Automatic" },
+ { INTEL_BROADCAST_RGB_FULL, "Full" },
+ { INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
+};
+
+void
+intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+
+ prop = dev_priv->broadcast_rgb_property;
+ if (prop == NULL) {
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ broadcast_rgb_names,
+ ARRAY_SIZE(broadcast_rgb_names));
+ if (prop == NULL)
+ return;
+
+ dev_priv->broadcast_rgb_property = prop;
+ }
+
+ drm_object_attach_property(&connector->base, prop, 0);
+}
diff --git a/usr/src/uts/intel/io/i915/intel_overlay.c b/usr/src/uts/intel/io/i915/intel_overlay.c
new file mode 100644
index 0000000..3441d09
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_overlay.c
@@ -0,0 +1,1533 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+/*
+ * Copyright (c) 2012, 2013, Intel Corporation. All rights reserved.
+ */
+/*
+ * Copyright © 2009
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel@ffwll.ch>
+ *
+ * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_drv.h"
+
+/* Limits for overlay size. According to intel doc, the real limits are:
+ * Y width: 4095, UV width (planar): 2047, Y height: 2047,
+ * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
+ * the mininum of both. */
+#define IMAGE_MAX_WIDTH 2048
+#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
+/* on 830 and 845 these large limits result in the card hanging */
+#define IMAGE_MAX_WIDTH_LEGACY 1024
+#define IMAGE_MAX_HEIGHT_LEGACY 1088
+
+/* overlay register definitions */
+/* OCMD register */
+#define OCMD_TILED_SURFACE (0x1<<19)
+#define OCMD_MIRROR_MASK (0x3<<17)
+#define OCMD_MIRROR_MODE (0x3<<17)
+#define OCMD_MIRROR_HORIZONTAL (0x1<<17)
+#define OCMD_MIRROR_VERTICAL (0x2<<17)
+#define OCMD_MIRROR_BOTH (0x3<<17)
+#define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
+#define OCMD_UV_SWAP (0x1<<14) /* YVYU */
+#define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */
+#define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */
+#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
+#define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_422_PACKED (0x8<<10)
+#define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_420_PLANAR (0xc<<10)
+#define OCMD_YUV_422_PLANAR (0xd<<10)
+#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
+#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
+#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
+#define OCMD_BUF_TYPE_MASK (0x1<<5)
+#define OCMD_BUF_TYPE_FRAME (0x0<<5)
+#define OCMD_BUF_TYPE_FIELD (0x1<<5)
+#define OCMD_TEST_MODE (0x1<<4)
+#define OCMD_BUFFER_SELECT (0x3<<2)
+#define OCMD_BUFFER0 (0x0<<2)
+#define OCMD_BUFFER1 (0x1<<2)
+#define OCMD_FIELD_SELECT (0x1<<2)
+#define OCMD_FIELD0 (0x0<<1)
+#define OCMD_FIELD1 (0x1<<1)
+#define OCMD_ENABLE (0x1<<0)
+
+/* OCONFIG register */
+#define OCONF_PIPE_MASK (0x1<<18)
+#define OCONF_PIPE_A (0x0<<18)
+#define OCONF_PIPE_B (0x1<<18)
+#define OCONF_GAMMA2_ENABLE (0x1<<16)
+#define OCONF_CSC_MODE_BT601 (0x0<<5)
+#define OCONF_CSC_MODE_BT709 (0x1<<5)
+#define OCONF_CSC_BYPASS (0x1<<4)
+#define OCONF_CC_OUT_8BIT (0x1<<3)
+#define OCONF_TEST_MODE (0x1<<2)
+#define OCONF_THREE_LINE_BUFFER (0x1<<0)
+#define OCONF_TWO_LINE_BUFFER (0x0<<0)
+
+/* DCLRKM (dst-key) register */
+#define DST_KEY_ENABLE (0x1UL<<31)
+#define CLK_RGB24_MASK 0x0
+#define CLK_RGB16_MASK 0x070307
+#define CLK_RGB15_MASK 0x070707
+#define CLK_RGB8I_MASK 0xffffff
+
+#define RGB16_TO_COLORKEY(c) \
+ (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
+#define RGB15_TO_COLORKEY(c) \
+ (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
+
+/* overlay flip addr flag */
+#define OFC_UPDATE 0x1
+
+/* polyphase filter coefficients */
+#define N_HORIZ_Y_TAPS 5
+#define N_VERT_Y_TAPS 3
+#define N_HORIZ_UV_TAPS 3
+#define N_VERT_UV_TAPS 3
+#define N_PHASES 17
+#define MAX_TAPS 5
+
+/* memory bufferd overlay registers */
+struct overlay_registers {
+ u32 OBUF_0Y;
+ u32 OBUF_1Y;
+ u32 OBUF_0U;
+ u32 OBUF_0V;
+ u32 OBUF_1U;
+ u32 OBUF_1V;
+ u32 OSTRIDE;
+ u32 YRGB_VPH;
+ u32 UV_VPH;
+ u32 HORZ_PH;
+ u32 INIT_PHS;
+ u32 DWINPOS;
+ u32 DWINSZ;
+ u32 SWIDTH;
+ u32 SWIDTHSW;
+ u32 SHEIGHT;
+ u32 YRGBSCALE;
+ u32 UVSCALE;
+ u32 OCLRC0;
+ u32 OCLRC1;
+ u32 DCLRKV;
+ u32 DCLRKM;
+ u32 SCLRKVH;
+ u32 SCLRKVL;
+ u32 SCLRKEN;
+ u32 OCONFIG;
+ u32 OCMD;
+ u32 RESERVED1; /* 0x6C */
+ u32 OSTART_0Y;
+ u32 OSTART_1Y;
+ u32 OSTART_0U;
+ u32 OSTART_0V;
+ u32 OSTART_1U;
+ u32 OSTART_1V;
+ u32 OTILEOFF_0Y;
+ u32 OTILEOFF_1Y;
+ u32 OTILEOFF_0U;
+ u32 OTILEOFF_0V;
+ u32 OTILEOFF_1U;
+ u32 OTILEOFF_1V;
+ u32 FASTHSCALE; /* 0xA0 */
+ u32 UVSCALEV; /* 0xA4 */
+ u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+ u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+ u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+ u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+ u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+ u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+ u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+ u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+ u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+};
+
+struct intel_overlay {
+ struct drm_device *dev;
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *vid_bo;
+ struct drm_i915_gem_object *old_vid_bo;
+ int active;
+ int pfit_active;
+ u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+ u32 color_key;
+ u32 brightness, contrast, saturation;
+ u32 old_xscale, old_yscale;
+ /* register access */
+ u32 flip_addr;
+ struct drm_i915_gem_object *reg_bo;
+ /* flip handling */
+ uint32_t last_flip_req;
+ void (*flip_tail)(struct intel_overlay *);
+};
+
+static struct overlay_registers *
+intel_overlay_map_regs(struct intel_overlay *overlay)
+{
+ struct overlay_registers *regs;
+
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ regs = (struct overlay_registers *)overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ regs = (struct overlay_registers *)(uintptr_t)overlay->reg_bo->page_list[0];
+
+ return regs;
+}
+
+static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+
+}
+
+static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+ void (*tail)(struct intel_overlay *))
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
+
+ BUG_ON(overlay->last_flip_req);
+ ret = i915_add_request(ring, &overlay->last_flip_req);
+ if (ret)
+ return ret;
+
+ overlay->flip_tail = tail;
+ ret = i915_wait_seqno(ring, overlay->last_flip_req);
+ if (ret)
+ return ret;
+ i915_gem_retire_requests(dev);
+
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* overlay needs to be disable in OCMD reg */
+static int intel_overlay_on(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
+
+ BUG_ON(overlay->active);
+ overlay->active = 1;
+
+ WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return intel_overlay_do_wait_request(overlay, NULL);
+}
+
+/* overlay needs to be enabled in OCMD reg */
+static int intel_overlay_continue(struct intel_overlay *overlay,
+ bool load_polyphase_filter)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ u32 flip_addr = overlay->flip_addr;
+ u32 tmp;
+ int ret;
+
+ BUG_ON(!overlay->active);
+
+ if (load_polyphase_filter)
+ flip_addr |= OFC_UPDATE;
+
+ /* check for underruns */
+ tmp = I915_READ(DOVSTA);
+ if (tmp & (1 << 17))
+ DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_advance(ring);
+
+ return i915_add_request(ring, &overlay->last_flip_req);
+}
+
+static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
+{
+ struct drm_i915_gem_object *obj = overlay->old_vid_bo;
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+
+ overlay->old_vid_bo = NULL;
+}
+
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+ struct drm_i915_gem_object *obj = overlay->vid_bo;
+
+ /* never have the overlay hw on without showing a frame */
+ BUG_ON(!overlay->vid_bo);
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ overlay->vid_bo = NULL;
+
+ overlay->crtc->overlay = NULL;
+ overlay->crtc = NULL;
+ overlay->active = 0;
+}
+
+/* overlay needs to be disabled in OCMD reg */
+static int intel_overlay_off(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ u32 flip_addr = overlay->flip_addr;
+ int ret;
+
+ BUG_ON(!overlay->active);
+
+ /* According to intel docs the overlay hw may hang (when switching
+ * off) without loading the filter coeffs. It is however unclear whether
+ * this applies to the disabling of the overlay or to the switching off
+ * of the hw. Do it in both cases */
+ flip_addr |= OFC_UPDATE;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ /* wait for overlay to go idle */
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ /* turn overlay off */
+ if (IS_I830(dev)) {
+ /* Workaround: Don't disable the overlay fully, since otherwise
+ * it dies on the next OVERLAY_ON cmd. */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ } else {
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ }
+ intel_ring_advance(ring);
+
+ return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
+}
+
+/* recover from an interruption due to a signal
+ * We have to be careful not to repeat work forever an make forward progess. */
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
+
+ if (overlay->last_flip_req == 0)
+ return 0;
+
+ ret = i915_wait_seqno(ring, overlay->last_flip_req);
+ if (ret)
+ return ret;
+ i915_gem_retire_requests(dev);
+
+ if (overlay->flip_tail)
+ overlay->flip_tail(overlay);
+
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+ * via intel_overlay_(un)map_regs
+ */
+static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
+
+ /* Only wait if there is actually an old frame to release to
+ * guarantee forward progress.
+ */
+ if (!overlay->old_vid_bo)
+ return 0;
+
+ if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+ /* synchronous slowpath */
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ ret = intel_overlay_do_wait_request(overlay,
+ intel_overlay_release_old_vid_tail);
+ if (ret)
+ return ret;
+ }
+
+ intel_overlay_release_old_vid_tail(overlay);
+ return 0;
+}
+
+struct put_image_params {
+ int format;
+ short dst_x;
+ short dst_y;
+ short dst_w;
+ short dst_h;
+ short src_w;
+ short src_scan_h;
+ short src_scan_w;
+ short src_h;
+ short stride_Y;
+ short stride_UV;
+ int offset_Y;
+ int offset_U;
+ int offset_V;
+};
+
+static int packed_depth_bytes(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ return 4;
+ case I915_OVERLAY_YUV411:
+ /* return 6; not implemented */
+ default:
+ return -EINVAL;
+ }
+}
+
+static int packed_width_bytes(u32 format, short width)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ return width << 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uv_hsubsampling(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV420:
+ return 2;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ return 4;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uv_vsubsampling(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV420:
+ case I915_OVERLAY_YUV410:
+ return 2;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV411:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+{
+ u32 mask, shift, ret;
+ if (IS_GEN2(dev)) {
+ mask = 0x1f;
+ shift = 5;
+ } else {
+ mask = 0x3f;
+ shift = 6;
+ }
+ ret = ((offset + width + mask) >> shift) - (offset >> shift);
+ if (!IS_GEN2(dev))
+ ret <<= 1;
+ ret -=1;
+ return ret << 2;
+}
+
+static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
+ 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
+ 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
+ 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
+ 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
+ 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
+ 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
+ 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
+ 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
+ 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
+ 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
+ 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
+ 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
+ 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
+ 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
+ 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
+ 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
+ 0xb000, 0x3000, 0x0800, 0x3000, 0xb000
+};
+
+static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
+ 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
+ 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
+ 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
+ 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
+ 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
+ 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
+ 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
+ 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
+ 0x3000, 0x0800, 0x3000
+};
+
+static void update_polyphase_filter(struct overlay_registers *regs)
+{
+ memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+ memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+}
+
+static bool update_scaling_factors(struct intel_overlay *overlay,
+ struct overlay_registers *regs,
+ struct put_image_params *params)
+{
+ /* fixed point with a 12 bit shift */
+ u32 xscale, yscale, xscale_UV, yscale_UV;
+#define FP_SHIFT 12
+#define FRACT_MASK 0xfff
+ bool scale_changed = false;
+ int uv_hscale = uv_hsubsampling(params->format);
+ int uv_vscale = uv_vsubsampling(params->format);
+
+ if (params->dst_w > 1)
+ xscale = ((params->src_scan_w - 1) << FP_SHIFT)
+ /(params->dst_w);
+ else
+ xscale = 1 << FP_SHIFT;
+
+ if (params->dst_h > 1)
+ yscale = ((params->src_scan_h - 1) << FP_SHIFT)
+ /(params->dst_h);
+ else
+ yscale = 1 << FP_SHIFT;
+
+ /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
+ xscale_UV = xscale/uv_hscale;
+ yscale_UV = yscale/uv_vscale;
+ /* make the Y scale to UV scale ratio an exact multiply */
+ xscale = xscale_UV * uv_hscale;
+ yscale = yscale_UV * uv_vscale;
+ /*} else {
+ xscale_UV = 0;
+ yscale_UV = 0;
+ }*/
+
+ if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
+ scale_changed = true;
+ overlay->old_xscale = xscale;
+ overlay->old_yscale = yscale;
+
+ regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
+ ((xscale >> FP_SHIFT) << 16) |
+ ((xscale & FRACT_MASK) << 3));
+
+ regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
+ ((xscale_UV >> FP_SHIFT) << 16) |
+ ((xscale_UV & FRACT_MASK) << 3));
+
+ regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
+ ((yscale_UV >> FP_SHIFT) << 0)));
+
+ if (scale_changed)
+ update_polyphase_filter(regs);
+
+ return scale_changed;
+}
+
+static void update_colorkey(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ u32 key = overlay->color_key;
+
+ switch (overlay->crtc->base.fb->bits_per_pixel) {
+ case 8:
+ regs->DCLRKV = 0;
+ regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+ break;
+
+ case 16:
+ if (overlay->crtc->base.fb->depth == 15) {
+ regs->DCLRKV = RGB15_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+ } else {
+ regs->DCLRKV = RGB16_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+ }
+ break;
+
+ case 24:
+ case 32:
+ regs->DCLRKV = key;
+ regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ break;
+ }
+}
+
+static u32 overlay_cmd_reg(struct put_image_params *params)
+{
+ u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
+
+ if (params->format & I915_OVERLAY_YUV_PLANAR) {
+ switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PLANAR;
+ break;
+ case I915_OVERLAY_YUV420:
+ cmd |= OCMD_YUV_420_PLANAR;
+ break;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ cmd |= OCMD_YUV_410_PLANAR;
+ break;
+ }
+ } else { /* YUV packed */
+ switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PACKED;
+ break;
+ case I915_OVERLAY_YUV411:
+ cmd |= OCMD_YUV_411_PACKED;
+ break;
+ }
+
+ switch (params->format & I915_OVERLAY_SWAP_MASK) {
+ case I915_OVERLAY_NO_SWAP:
+ break;
+ case I915_OVERLAY_UV_SWAP:
+ cmd |= OCMD_UV_SWAP;
+ break;
+ case I915_OVERLAY_Y_SWAP:
+ cmd |= OCMD_Y_SWAP;
+ break;
+ case I915_OVERLAY_Y_AND_UV_SWAP:
+ cmd |= OCMD_Y_AND_UV_SWAP;
+ break;
+ }
+ }
+
+ return cmd;
+}
+
+static int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ struct drm_i915_gem_object *new_bo,
+ struct put_image_params *params)
+{
+ int ret, tmp_width;
+ struct overlay_registers *regs;
+ bool scale_changed = false;
+ /* LINTED */
+ struct drm_device *dev = overlay->dev;
+ u32 swidth, swidthsw, sheight, ostride;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ BUG_ON(!overlay);
+
+ ret = intel_overlay_release_old_vid(overlay);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_object_put_fence(new_bo);
+ if (ret)
+ goto out_unpin;
+
+ if (!overlay->active) {
+ u32 oconfig;
+ regs = intel_overlay_map_regs(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ oconfig = OCONF_CC_OUT_8BIT;
+ if (IS_GEN4(overlay->dev))
+ oconfig |= OCONF_CSC_MODE_BT709;
+ oconfig |= overlay->crtc->pipe == 0 ?
+ OCONF_PIPE_A : OCONF_PIPE_B;
+ regs->OCONFIG = oconfig;
+ intel_overlay_unmap_regs(overlay, regs);
+
+ ret = intel_overlay_on(overlay);
+ if (ret != 0)
+ goto out_unpin;
+ }
+
+ regs = intel_overlay_map_regs(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+
+ regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
+ regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+
+ if (params->format & I915_OVERLAY_YUV_PACKED)
+ tmp_width = packed_width_bytes(params->format, params->src_w);
+ else
+ tmp_width = params->src_w;
+
+ swidth = params->src_w;
+ swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+ sheight = params->src_h;
+ regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
+ ostride = params->stride_Y;
+
+ if (params->format & I915_OVERLAY_YUV_PLANAR) {
+ int uv_hscale = uv_hsubsampling(params->format);
+ int uv_vscale = uv_vsubsampling(params->format);
+ u32 tmp_U, tmp_V;
+ swidth |= (params->src_w/uv_hscale) << 16;
+ tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+ params->src_w/uv_hscale);
+ tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+ params->src_w/uv_hscale);
+ swidthsw |= max(tmp_U, tmp_V) << 16;
+ sheight |= (params->src_h/uv_vscale) << 16;
+ regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
+ regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
+ ostride |= params->stride_UV << 16;
+ }
+
+ regs->SWIDTH = swidth;
+ regs->SWIDTHSW = swidthsw;
+ regs->SHEIGHT = sheight;
+ regs->OSTRIDE = ostride;
+ scale_changed = update_scaling_factors(overlay, regs, params);
+
+ update_colorkey(overlay, regs);
+
+ regs->OCMD = overlay_cmd_reg(params);
+
+ intel_overlay_unmap_regs(overlay, regs);
+
+ ret = intel_overlay_continue(overlay, scale_changed);
+ if (ret)
+ goto out_unpin;
+
+ overlay->old_vid_bo = overlay->vid_bo;
+ overlay->vid_bo = new_bo;
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin(new_bo);
+ return ret;
+}
+
+int intel_overlay_switch_off(struct intel_overlay *overlay)
+{
+ /* LINTED */
+ struct drm_device *dev = overlay->dev;
+ struct overlay_registers *regs;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+ ret = intel_overlay_recover_from_interrupt(overlay);
+ if (ret != 0)
+ return ret;
+
+ if (!overlay->active)
+ return 0;
+
+ ret = intel_overlay_release_old_vid(overlay);
+ if (ret != 0)
+ return ret;
+
+ regs = intel_overlay_map_regs(overlay);
+ regs->OCMD = 0;
+ intel_overlay_unmap_regs(overlay, regs);
+
+ ret = intel_overlay_off(overlay);
+ if (ret != 0)
+ return ret;
+
+ intel_overlay_off_tail(overlay);
+ return 0;
+}
+
+static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+ struct intel_crtc *crtc)
+{
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+
+ if (!crtc->active)
+ return -EINVAL;
+
+ /* can't use the overlay with double wide pipe */
+ if (INTEL_INFO(overlay->dev)->gen < 4 &&
+ (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 pfit_control = I915_READ(PFIT_CONTROL);
+ u32 ratio;
+
+ /* XXX: This is not the same logic as in the xorg driver, but more in
+ * line with the intel documentation for the i965
+ */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* on i965 use the PGM reg to read out the autoscaler values */
+ ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+ } else {
+ if (pfit_control & VERT_AUTO_SCALE)
+ ratio = I915_READ(PFIT_AUTO_RATIOS);
+ else
+ ratio = I915_READ(PFIT_PGM_RATIOS);
+ ratio >>= PFIT_VERT_SCALE_SHIFT;
+ }
+
+ overlay->pfit_vscale_ratio = ratio;
+}
+
+static int check_overlay_dst(struct intel_overlay *overlay,
+ struct drm_intel_overlay_put_image *rec)
+{
+ struct drm_display_mode *mode = &overlay->crtc->base.mode;
+
+ if (rec->dst_x < mode->hdisplay &&
+ rec->dst_x + rec->dst_width <= mode->hdisplay &&
+ rec->dst_y < mode->vdisplay &&
+ rec->dst_y + rec->dst_height <= mode->vdisplay)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int check_overlay_scaling(struct put_image_params *rec)
+{
+ u32 tmp;
+
+ /* downscaling limit is 8.0 */
+ tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
+ if (tmp > 7)
+ return -EINVAL;
+ tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
+ if (tmp > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int check_overlay_src(struct drm_device *dev,
+ struct drm_intel_overlay_put_image *rec,
+ struct drm_i915_gem_object *new_bo)
+{
+ int uv_hscale = uv_hsubsampling(rec->flags);
+ int uv_vscale = uv_vsubsampling(rec->flags);
+ u32 stride_mask;
+ int depth;
+ u32 tmp;
+
+ /* check src dimensions */
+ if (IS_845G(dev) || IS_I830(dev)) {
+ if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
+ rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+ return -EINVAL;
+ } else {
+ if (rec->src_height > IMAGE_MAX_HEIGHT ||
+ rec->src_width > IMAGE_MAX_WIDTH)
+ return -EINVAL;
+ }
+
+ /* better safe than sorry, use 4 as the maximal subsampling ratio */
+ if (rec->src_height < N_VERT_Y_TAPS*4 ||
+ rec->src_width < N_HORIZ_Y_TAPS*4)
+ return -EINVAL;
+
+ /* check alignment constraints */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ case I915_OVERLAY_RGB:
+ /* not implemented */
+ return -EINVAL;
+
+ case I915_OVERLAY_YUV_PACKED:
+ if (uv_vscale != 1)
+ return -EINVAL;
+
+ depth = packed_depth_bytes(rec->flags);
+ if (depth < 0)
+ return depth;
+
+ /* ignore UV planes */
+ rec->stride_UV = 0;
+ rec->offset_U = 0;
+ rec->offset_V = 0;
+ /* check pixel alignment */
+ if (rec->offset_Y % depth)
+ return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (uv_vscale < 0 || uv_hscale < 0)
+ return -EINVAL;
+ /* no offset restrictions for planar formats */
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (rec->src_width % uv_hscale)
+ return -EINVAL;
+
+ /* stride checking */
+ if (IS_I830(dev) || IS_845G(dev))
+ stride_mask = 255;
+ else
+ stride_mask = 63;
+
+ if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+ return -EINVAL;
+ if (IS_GEN4(dev) && rec->stride_Y < 512)
+ return -EINVAL;
+
+ tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
+ 4096 : 8192;
+ if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
+ return -EINVAL;
+
+ /* check buffer dimensions */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ case I915_OVERLAY_RGB:
+ case I915_OVERLAY_YUV_PACKED:
+ /* always 4 Y values per depth pixels */
+ if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
+ return -EINVAL;
+
+ tmp = rec->stride_Y*rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->base.size)
+ return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (rec->src_width > rec->stride_Y)
+ return -EINVAL;
+ if (rec->src_width/uv_hscale > rec->stride_UV)
+ return -EINVAL;
+
+ tmp = rec->stride_Y * rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->base.size)
+ return -EINVAL;
+
+ tmp = rec->stride_UV * (rec->src_height / uv_vscale);
+ if (rec->offset_U + tmp > new_bo->base.size ||
+ rec->offset_V + tmp > new_bo->base.size)
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pfit_control;
+
+ /* i830 doesn't have a panel fitter */
+ if (IS_I830(dev))
+ return -1;
+
+ pfit_control = I915_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+
+ /* 965 can place panel fitter on either pipe */
+ if (IS_GEN4(dev))
+ return (pfit_control >> 29) & 0x3;
+
+ /* older chips can only use pipe 1 */
+ return 1;
+}
+
+int intel_overlay_put_image(DRM_IOCTL_ARGS)
+{
+ struct drm_intel_overlay_put_image *put_image_rec = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct drm_mode_object *drmmode_obj;
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *new_bo;
+ struct put_image_params *params;
+ int ret;
+
+ /* No need to check for DRIVER_MODESET - we don't set it up then. */
+ overlay = dev_priv->overlay;
+ if (!overlay) {
+ DRM_DEBUG("userspace bug: no overlay\n");
+ return -ENODEV;
+ }
+
+ if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
+ drm_modeset_lock_all(dev);
+ mutex_lock(&dev->struct_mutex);
+
+ ret = intel_overlay_switch_off(overlay);
+
+ mutex_unlock(&dev->struct_mutex);
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+ }
+
+ params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!drmmode_obj) {
+ ret = -ENOENT;
+ goto out_free;
+ }
+ crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+
+ new_bo = to_intel_bo(drm_gem_object_lookup(dev, file,
+ put_image_rec->bo_handle));
+ if (&new_bo->base == NULL) {
+ ret = -ENOENT;
+ goto out_free;
+ }
+
+ drm_modeset_lock_all(dev);
+ mutex_lock(&dev->struct_mutex);
+
+ if (new_bo->tiling_mode) {
+ DRM_ERROR("buffer used for overlay image can not be tiled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = intel_overlay_recover_from_interrupt(overlay);
+ if (ret != 0)
+ goto out_unlock;
+
+ if (overlay->crtc != crtc) {
+ struct drm_display_mode *mode = &crtc->base.mode;
+ ret = intel_overlay_switch_off(overlay);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = check_overlay_possible_on_crtc(overlay, crtc);
+ if (ret != 0)
+ goto out_unlock;
+
+ overlay->crtc = crtc;
+ crtc->overlay = overlay;
+
+ /* line too wide, i.e. one-line-mode */
+ if (mode->hdisplay > 1024 &&
+ intel_panel_fitter_pipe(dev) == crtc->pipe) {
+ overlay->pfit_active = 1;
+ update_pfit_vscale_ratio(overlay);
+ } else
+ overlay->pfit_active = 0;
+ }
+
+ ret = check_overlay_dst(overlay, put_image_rec);
+ if (ret != 0)
+ goto out_unlock;
+
+ if (overlay->pfit_active) {
+ params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
+ overlay->pfit_vscale_ratio);
+ /* shifting right rounds downwards, so add 1 */
+ params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
+ overlay->pfit_vscale_ratio) + 1;
+ } else {
+ params->dst_y = put_image_rec->dst_y;
+ params->dst_h = put_image_rec->dst_height;
+ }
+ params->dst_x = put_image_rec->dst_x;
+ params->dst_w = put_image_rec->dst_width;
+
+ params->src_w = put_image_rec->src_width;
+ params->src_h = put_image_rec->src_height;
+ params->src_scan_w = put_image_rec->src_scan_width;
+ params->src_scan_h = put_image_rec->src_scan_height;
+ if (params->src_scan_h > params->src_h ||
+ params->src_scan_w > params->src_w) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = check_overlay_src(dev, put_image_rec, new_bo);
+ if (ret != 0)
+ goto out_unlock;
+ params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
+ params->stride_Y = put_image_rec->stride_Y;
+ params->stride_UV = put_image_rec->stride_UV;
+ params->offset_Y = put_image_rec->offset_Y;
+ params->offset_U = put_image_rec->offset_U;
+ params->offset_V = put_image_rec->offset_V;
+
+ /* Check scaling after src size to prevent a divide-by-zero. */
+ ret = check_overlay_scaling(params);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = intel_overlay_do_put_image(overlay, new_bo, params);
+ if (ret != 0)
+ goto out_unlock;
+
+ mutex_unlock(&dev->struct_mutex);
+ drm_modeset_unlock_all(dev);
+
+ kfree(params, sizeof(*params));
+
+ return 0;
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ drm_modeset_unlock_all(dev);
+ drm_gem_object_unreference_unlocked(&new_bo->base);
+out_free:
+ kfree(params, sizeof(*params));
+
+ return ret;
+}
+
+static void update_reg_attrs(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
+ regs->OCLRC1 = overlay->saturation;
+}
+
+static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
+{
+ int i;
+
+ if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
+ return false;
+
+ for (i = 0; i < 3; i++) {
+ if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_gamma5_errata(u32 gamma5)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (((gamma5 >> i*8) & 0xff) == 0x80)
+ return false;
+ }
+
+ return true;
+}
+
+static int check_gamma(struct drm_intel_overlay_attrs *attrs)
+{
+ if (!check_gamma_bounds(0, attrs->gamma0) ||
+ !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
+ !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
+ !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
+ !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
+ !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
+ !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+ return -EINVAL;
+
+ if (!check_gamma5_errata(attrs->gamma5))
+ return -EINVAL;
+
+ return 0;
+}
+
+int intel_overlay_attrs(DRM_IOCTL_ARGS)
+{
+ struct drm_intel_overlay_attrs *attrs = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct overlay_registers *regs;
+ int ret;
+
+ /* No need to check for DRIVER_MODESET - we don't set it up then. */
+ overlay = dev_priv->overlay;
+ if (!overlay) {
+ DRM_DEBUG("userspace bug: no overlay\n");
+ return -ENODEV;
+ }
+
+ drm_modeset_lock_all(dev);
+ mutex_lock(&dev->struct_mutex);
+
+ ret = -EINVAL;
+ if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
+ attrs->color_key = overlay->color_key;
+ attrs->brightness = overlay->brightness;
+ attrs->contrast = overlay->contrast;
+ attrs->saturation = overlay->saturation;
+
+ if (!IS_GEN2(dev)) {
+ attrs->gamma0 = I915_READ(OGAMC0);
+ attrs->gamma1 = I915_READ(OGAMC1);
+ attrs->gamma2 = I915_READ(OGAMC2);
+ attrs->gamma3 = I915_READ(OGAMC3);
+ attrs->gamma4 = I915_READ(OGAMC4);
+ attrs->gamma5 = I915_READ(OGAMC5);
+ }
+ } else {
+ if (attrs->brightness < -128 || attrs->brightness > 127)
+ goto out_unlock;
+ if (attrs->contrast > 255)
+ goto out_unlock;
+ if (attrs->saturation > 1023)
+ goto out_unlock;
+
+ overlay->color_key = attrs->color_key;
+ overlay->brightness = attrs->brightness;
+ overlay->contrast = attrs->contrast;
+ overlay->saturation = attrs->saturation;
+
+ regs = intel_overlay_map_regs(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ update_reg_attrs(overlay, regs);
+
+ intel_overlay_unmap_regs(overlay, regs);
+
+ if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
+ if (IS_GEN2(dev))
+ goto out_unlock;
+
+ if (overlay->active) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = check_gamma(attrs);
+ if (ret)
+ goto out_unlock;
+
+ I915_WRITE(OGAMC0, attrs->gamma0);
+ I915_WRITE(OGAMC1, attrs->gamma1);
+ I915_WRITE(OGAMC2, attrs->gamma2);
+ I915_WRITE(OGAMC3, attrs->gamma3);
+ I915_WRITE(OGAMC4, attrs->gamma4);
+ I915_WRITE(OGAMC5, attrs->gamma5);
+ }
+ }
+
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+void intel_setup_overlay(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct drm_i915_gem_object *reg_bo;
+ struct overlay_registers *regs;
+ int ret;
+
+ if (!HAS_OVERLAY(dev))
+ return;
+
+ overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+ if (!overlay)
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+ if (dev_priv->overlay) {
+ DRM_ERROR("BUG");
+ goto out_free;
+ }
+
+ overlay->dev = dev;
+
+ reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
+ if (reg_bo == NULL)
+ reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
+ if (reg_bo == NULL)
+ goto out_free;
+ overlay->reg_bo = reg_bo;
+
+ if (OVERLAY_NEEDS_PHYSICAL(dev)) {
+ ret = i915_gem_attach_phys_object(dev, reg_bo,
+ I915_GEM_PHYS_OVERLAY_REGS,
+ PAGE_SIZE);
+ if (ret) {
+ DRM_ERROR("failed to attach phys overlay regs\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = reg_bo->phys_obj->handle->paddr;
+ } else {
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
+ if (ret) {
+ DRM_ERROR("failed to pin overlay register bo\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = reg_bo->gtt_offset;
+
+ ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
+ if (ret) {
+ DRM_ERROR("failed to move overlay register bo into the GTT\n");
+ goto out_unpin_bo;
+ }
+ }
+
+ /* init all values */
+ overlay->color_key = 0x0101fe;
+ overlay->brightness = UINT_MAX-19;
+ overlay->contrast = 75;
+ overlay->saturation = 146;
+
+ regs = intel_overlay_map_regs(overlay);
+ if (!regs)
+ goto out_unpin_bo;
+
+ (void) memset(regs, 0, sizeof(struct overlay_registers));
+ update_polyphase_filter(regs);
+ update_reg_attrs(overlay, regs);
+
+ intel_overlay_unmap_regs(overlay, regs);
+
+ dev_priv->overlay = overlay;
+ mutex_unlock(&dev->struct_mutex);
+ DRM_INFO("initialized overlay support\n");
+ return;
+
+out_unpin_bo:
+ if (!OVERLAY_NEEDS_PHYSICAL(dev))
+ i915_gem_object_unpin(reg_bo);
+out_free_bo:
+ drm_gem_object_unreference(&reg_bo->base);
+out_free:
+ mutex_unlock(&dev->struct_mutex);
+ kfree(overlay, sizeof(*overlay));
+ return;
+}
+
+void intel_cleanup_overlay(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!dev_priv->overlay)
+ return;
+
+ /* The bo's should be free'd by the generic code already.
+ * Furthermore modesetting teardown happens beforehand so the
+ * hardware should be off already */
+ BUG_ON(dev_priv->overlay->active);
+
+ drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
+ kfree(dev_priv->overlay, sizeof(*dev_priv->overlay));
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct intel_overlay_error_state {
+ struct overlay_registers regs;
+ unsigned long base;
+ u32 dovsta;
+ u32 isr;
+};
+
+static struct overlay_registers *
+intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+{
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ struct overlay_registers *regs;
+
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ overlay->reg_bo->gtt_offset);
+
+ return regs;
+}
+
+static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ io_mapping_unmap_atomic(regs);
+}
+
+
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay = dev_priv->overlay;
+ struct intel_overlay_error_state *error;
+ struct overlay_registers __iomem *regs;
+
+ if (!overlay || !overlay->active)
+ return NULL;
+
+ error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ if (error == NULL)
+ return NULL;
+
+ error->dovsta = I915_READ(DOVSTA);
+ error->isr = I915_READ(ISR);
+ if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+ else
+ error->base = (long) overlay->reg_bo->gtt_offset;
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs)
+ goto err;
+
+ memcpy(&error->regs, regs, sizeof(struct overlay_registers));
+ intel_overlay_unmap_regs_atomic(overlay, regs);
+
+ return error;
+
+err:
+ kfree(error, sizeof(*error));
+ return NULL;
+}
+
+void
+intel_overlay_print_error_state(struct intel_overlay_error_state *error)
+{
+ DRM_ERROR("Overlay, status: 0x%08x, interrupt: 0x%08x\n",
+ error->dovsta, error->isr);
+ DRM_ERROR(" Register file at 0x%08lx:\n",
+ error->base);
+
+#define P(x) DRM_ERROR(" " #x ": 0x%08x\n", error->regs.x)
+ P(OBUF_0Y);
+ P(OBUF_1Y);
+ P(OBUF_0U);
+ P(OBUF_0V);
+ P(OBUF_1U);
+ P(OBUF_1V);
+ P(OSTRIDE);
+ P(YRGB_VPH);
+ P(UV_VPH);
+ P(HORZ_PH);
+ P(INIT_PHS);
+ P(DWINPOS);
+ P(DWINSZ);
+ P(SWIDTH);
+ P(SWIDTHSW);
+ P(SHEIGHT);
+ P(YRGBSCALE);
+ P(UVSCALE);
+ P(OCLRC0);
+ P(OCLRC1);
+ P(DCLRKV);
+ P(DCLRKM);
+ P(SCLRKVH);
+ P(SCLRKVL);
+ P(SCLRKEN);
+ P(OCONFIG);
+ P(OCMD);
+ P(OSTART_0Y);
+ P(OSTART_1Y);
+ P(OSTART_0U);
+ P(OSTART_0V);
+ P(OSTART_1U);
+ P(OSTART_1V);
+ P(OTILEOFF_0Y);
+ P(OTILEOFF_1Y);
+ P(OTILEOFF_0U);
+ P(OTILEOFF_0V);
+ P(OTILEOFF_1U);
+ P(OTILEOFF_1V);
+ P(FASTHSCALE);
+ P(UVSCALEV);
+#undef P
+}
+#endif
diff --git a/usr/src/uts/intel/io/i915/intel_panel.c b/usr/src/uts/intel/io/i915/intel_panel.c
new file mode 100644
index 0000000..4bc44b4
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_panel.c
@@ -0,0 +1,738 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006-2010, 2013, Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#include "intel_drv.h"
+
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
+
+void
+intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+
+ adjusted_mode->clock = fixed_mode->clock;
+}
+
+/* adjusted_mode has been preset to be the panel's fixed mode */
+void
+intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
+ struct intel_crtc_config *pipe_config,
+ int fitting_mode)
+{
+ struct drm_display_mode *mode, *adjusted_mode;
+ int x, y, width, height;
+
+ mode = &pipe_config->requested_mode;
+ adjusted_mode = &pipe_config->adjusted_mode;
+
+ x = y = width = height = 0;
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->hdisplay == mode->hdisplay &&
+ adjusted_mode->vdisplay == mode->vdisplay)
+ goto done;
+
+ switch (fitting_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ width = mode->hdisplay;
+ height = mode->vdisplay;
+ x = (adjusted_mode->hdisplay - width + 1)/2;
+ y = (adjusted_mode->vdisplay - height + 1)/2;
+ break;
+
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+ {
+ u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+ if (scaled_width > scaled_height) { /* pillar */
+ width = scaled_height / mode->vdisplay;
+ if (width & 1)
+ width++;
+ x = (adjusted_mode->hdisplay - width + 1) / 2;
+ y = 0;
+ height = adjusted_mode->vdisplay;
+ } else if (scaled_width < scaled_height) { /* letter */
+ height = scaled_width / mode->hdisplay;
+ if (height & 1)
+ height++;
+ y = (adjusted_mode->vdisplay - height + 1) / 2;
+ x = 0;
+ width = adjusted_mode->hdisplay;
+ } else {
+ x = y = 0;
+ width = adjusted_mode->hdisplay;
+ height = adjusted_mode->vdisplay;
+ }
+ }
+ break;
+
+ case DRM_MODE_SCALE_FULLSCREEN:
+ x = y = 0;
+ width = adjusted_mode->hdisplay;
+ height = adjusted_mode->vdisplay;
+ break;
+
+ default:
+ DRM_ERROR("bad panel fit mode: %d\n", fitting_mode);
+ return;
+ }
+
+done:
+ pipe_config->pch_pfit.pos = (x << 16) | y;
+ pipe_config->pch_pfit.size = (width << 16) | height;
+}
+
+static void
+centre_horizontally(struct drm_display_mode *mode,
+ int width)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the hsync and hblank widths constant */
+ sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (mode->hdisplay - width + 1) / 2;
+ border += border & 1; /* make the border even */
+
+ mode->crtc_hdisplay = width;
+ mode->crtc_hblank_start = width + border;
+ mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
+
+ mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
+ mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
+}
+
+static void
+centre_vertically(struct drm_display_mode *mode,
+ int height)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the vsync and vblank widths constant */
+ sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (mode->vdisplay - height + 1) / 2;
+
+ mode->crtc_vdisplay = height;
+ mode->crtc_vblank_start = height + border;
+ mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
+
+ mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
+ mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
+}
+
+static inline u32 panel_fitter_scaling(u32 source, u32 target)
+{
+ /*
+ * Floating point operation is not supported. So the FACTOR
+ * is defined, which can avoid the floating point computation
+ * when calculating the panel ratio.
+ */
+#define ACCURACY 12
+#define FACTOR (1 << ACCURACY)
+ u32 ratio = source * FACTOR / target;
+ return (FACTOR * ratio + FACTOR/2) / FACTOR;
+}
+
+void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
+ struct intel_crtc_config *pipe_config,
+ int fitting_mode)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+ struct drm_display_mode *mode, *adjusted_mode;
+
+ mode = &pipe_config->requested_mode;
+ adjusted_mode = &pipe_config->adjusted_mode;
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->hdisplay == mode->hdisplay &&
+ adjusted_mode->vdisplay == mode->vdisplay)
+ goto out;
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ pipe_config->timings_set = true;
+
+ switch (fitting_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ /*
+ * For centered modes, we have to calculate border widths &
+ * heights and modify the values programmed into the CRTC.
+ */
+ centre_horizontally(adjusted_mode, mode->hdisplay);
+ centre_vertically(adjusted_mode, mode->vdisplay);
+ border = LVDS_BORDER_ENABLE;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ u32 scaled_width = adjusted_mode->hdisplay *
+ mode->vdisplay;
+ u32 scaled_height = mode->hdisplay *
+ adjusted_mode->vdisplay;
+
+ /* 965+ is easy, it does everything in hw */
+ if (scaled_width > scaled_height)
+ pfit_control |= PFIT_ENABLE |
+ PFIT_SCALING_PILLAR;
+ else if (scaled_width < scaled_height)
+ pfit_control |= PFIT_ENABLE |
+ PFIT_SCALING_LETTER;
+ else if (adjusted_mode->hdisplay != mode->hdisplay)
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
+ } else {
+ u32 scaled_width = adjusted_mode->hdisplay *
+ mode->vdisplay;
+ u32 scaled_height = mode->hdisplay *
+ adjusted_mode->vdisplay;
+ /*
+ * For earlier chips we have to calculate the scaling
+ * ratio by hand and program it into the
+ * PFIT_PGM_RATIO register
+ */
+ if (scaled_width > scaled_height) { /* pillar */
+ centre_horizontally(adjusted_mode,
+ scaled_height /
+ mode->vdisplay);
+
+ border = LVDS_BORDER_ENABLE;
+ if (mode->vdisplay != adjusted_mode->vdisplay) {
+ u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
+ pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else if (scaled_width < scaled_height) { /* letter */
+ centre_vertically(adjusted_mode,
+ scaled_width /
+ mode->hdisplay);
+
+ border = LVDS_BORDER_ENABLE;
+ if (mode->hdisplay != adjusted_mode->hdisplay) {
+ u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
+ pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else {
+ /* Aspects match, Let hw scale both directions */
+ pfit_control |= (PFIT_ENABLE |
+ VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ }
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ /*
+ * Full scaling, even if it changes the aspect ratio.
+ * Fortunately this is all done for us in hw.
+ */
+ if (mode->vdisplay != adjusted_mode->vdisplay ||
+ mode->hdisplay != adjusted_mode->hdisplay) {
+ pfit_control |= PFIT_ENABLE;
+ if (INTEL_INFO(dev)->gen >= 4)
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (VERT_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_AUTO_SCALE |
+ HORIZ_INTERP_BILINEAR);
+ }
+ break;
+ default:
+ DRM_ERROR("bad panel fit mode: %d\n", fitting_mode);
+ return;
+ }
+
+ /* 965+ wants fuzzy fitting */
+ /* FIXME: handle multiple panels by failing gracefully */
+ if (INTEL_INFO(dev)->gen >= 4)
+ pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+ PFIT_FILTER_FUZZY);
+
+out:
+ if ((pfit_control & PFIT_ENABLE) == 0) {
+ pfit_control = 0;
+ pfit_pgm_ratios = 0;
+ }
+
+ /* Make sure pre-965 set dither correctly for 18bpp panels. */
+ if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+ pipe_config->gmch_pfit.control = pfit_control;
+ pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
+ pipe_config->gmch_pfit.lvds_border_bits = border;
+}
+
+static int is_backlight_combination_mode(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+
+ if (IS_GEN2(dev))
+ return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+
+ return 0;
+}
+
+static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ /* Restore the CTL value if it lost, e.g. GPU reset */
+
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ val = I915_READ(BLC_PWM_PCH_CTL2);
+ if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
+ dev_priv->regfile.saveBLC_PWM_CTL2 = val;
+ } else if (val == 0) {
+ val = dev_priv->regfile.saveBLC_PWM_CTL2;
+ I915_WRITE(BLC_PWM_PCH_CTL2, val);
+ }
+ } else {
+ val = I915_READ(BLC_PWM_CTL);
+ if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
+ dev_priv->regfile.saveBLC_PWM_CTL = val;
+ if (INTEL_INFO(dev)->gen >= 4)
+ dev_priv->regfile.saveBLC_PWM_CTL2 =
+ I915_READ(BLC_PWM_CTL2);
+ } else if (val == 0) {
+ val = dev_priv->regfile.saveBLC_PWM_CTL;
+ I915_WRITE(BLC_PWM_CTL, val);
+ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(BLC_PWM_CTL2,
+ dev_priv->regfile.saveBLC_PWM_CTL2);
+ }
+ }
+
+ return val;
+}
+
+static u32 intel_panel_get_max_backlight(struct drm_device *dev)
+{
+ u32 max;
+
+ max = i915_read_blc_pwm_ctl(dev);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ max >>= 16;
+ } else {
+ if (INTEL_INFO(dev)->gen < 4)
+ max >>= 17;
+ else
+ max >>= 16;
+
+ if (is_backlight_combination_mode(dev))
+ max *= 0xff;
+ }
+
+ DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
+ return max;
+}
+
+static int i915_panel_invert_brightness;
+static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (i915_panel_invert_brightness < 0)
+ return val;
+
+ if (i915_panel_invert_brightness > 0 ||
+ dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
+ u32 max = intel_panel_get_max_backlight(dev);
+ if (max)
+ return max - val;
+ }
+
+ return val;
+}
+
+static u32 intel_panel_get_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ } else {
+ val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (INTEL_INFO(dev)->gen < 4)
+ val >>= 1;
+
+ if (is_backlight_combination_mode(dev)){
+ u8 lbpc;
+
+ pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+ val *= lbpc;
+ }
+ }
+
+ val = intel_panel_compute_brightness(dev, val);
+
+ spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+
+ DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+ return val;
+}
+
+static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CPU_CTL, val | level);
+}
+
+static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+ level = intel_panel_compute_brightness(dev, level);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ intel_pch_panel_set_backlight(dev, level);
+ return;
+ }
+
+ if (is_backlight_combination_mode(dev)){
+ u32 max = intel_panel_get_max_backlight(dev);
+ u8 lbpc;
+
+ /* we're screwed, but keep behaviour backwards compatible */
+ if (!max)
+ max = 1;
+
+ lbpc = level * 0xfe / max + 1;
+ level /= lbpc;
+ pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
+ }
+
+ tmp = I915_READ(BLC_PWM_CTL);
+ if (INTEL_INFO(dev)->gen < 4)
+ level <<= 1;
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
+
+/* set backlight brightness to level in range [0..max] */
+void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 freq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+
+ freq = intel_panel_get_max_backlight(dev);
+ if (!freq) {
+ /* we are screwed, bail out */
+ goto out;
+ }
+
+ /* scale to hardware, but be careful to not overflow */
+ if (freq < max)
+ level = level * freq / max;
+ else
+ level = freq / max * level;
+
+ dev_priv->backlight.level = level;
+// if (dev_priv->backlight.device)
+// dev_priv->backlight.device->props.brightness = level;
+
+ if (dev_priv->backlight.enabled)
+ intel_panel_actually_set_backlight(dev, level);
+out:
+ spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+}
+
+void intel_panel_disable_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ /*
+ * Do not disable backlight on the vgaswitcheroo path. When switching
+ * away from i915, the other client may depend on i915 to handle the
+ * backlight. This will leave the backlight on unnecessarily when
+ * another client is not activated.
+ */
+ if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
+ return;
+ }
+
+ spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+
+ dev_priv->backlight.enabled = false;
+ intel_panel_actually_set_backlight(dev, 0);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ uint32_t reg, tmp;
+
+ reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+
+ I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ tmp &= ~BLM_PCH_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+ }
+ }
+
+ spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+}
+
+void intel_panel_enable_backlight(struct drm_device *dev,
+ enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder =
+ intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+
+ if (dev_priv->backlight.level == 0) {
+ dev_priv->backlight.level = intel_panel_get_max_backlight(dev);
+// if (dev_priv->backlight.device)
+// dev_priv->backlight.device->props.brightness =
+// dev_priv->backlight.level;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ uint32_t reg, tmp;
+
+ reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+
+
+ tmp = I915_READ(reg);
+
+ /* Note that this can also get called through dpms changes. And
+ * we don't track the backlight dpms state, hence check whether
+ * we have to do anything first. */
+ if (tmp & BLM_PWM_ENABLE)
+ goto set_level;
+
+ if (INTEL_INFO(dev)->num_pipes == 3)
+ tmp &= ~BLM_PIPE_SELECT_IVB;
+ else
+ tmp &= ~BLM_PIPE_SELECT;
+
+ if (cpu_transcoder == TRANSCODER_EDP)
+ tmp |= BLM_TRANSCODER_EDP;
+ else
+ tmp |= BLM_PIPE(cpu_transcoder);
+ tmp &= ~BLM_PWM_ENABLE;
+
+ I915_WRITE(reg, tmp);
+ POSTING_READ(reg);
+ I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
+
+ if (HAS_PCH_SPLIT(dev) &&
+ !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ tmp |= BLM_PCH_PWM_ENABLE;
+ tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+ }
+ }
+
+set_level:
+ /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
+ * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
+ * registers are set.
+ */
+ dev_priv->backlight.enabled = true;
+ intel_panel_actually_set_backlight(dev, dev_priv->backlight.level);
+
+ spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+}
+
+static void intel_panel_init_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->backlight.level = intel_panel_get_backlight(dev);
+ dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
+}
+
+enum drm_connector_status
+intel_panel_detect(struct drm_device *dev)
+{
+#if 0
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Assume that the BIOS does not lie through the OpRegion... */
+ if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
+ u32 *regs = (u32 *)dev_priv->opregion.lid_state;
+ return regs[0] & 0x1 ?
+ connector_status_connected :
+ connector_status_disconnected;
+ }
+ switch (i915_panel_ignore_lid) {
+ case -2:
+ return connector_status_connected;
+ case -1:
+ return connector_status_disconnected;
+ default:
+ return connector_status_unknown;
+ }
+#endif
+
+ return connector_status_unknown;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+static int intel_panel_update_status(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ intel_panel_set_backlight(dev, bd->props.brightness,
+ bd->props.max_brightness);
+ return 0;
+}
+
+static int intel_panel_get_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ return intel_panel_get_backlight(dev);
+}
+
+static const struct backlight_ops intel_panel_bl_ops = {
+ .update_status = intel_panel_update_status,
+ .get_brightness = intel_panel_get_brightness,
+};
+
+int intel_panel_setup_backlight(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct backlight_properties props;
+ unsigned long flags;
+
+ intel_panel_init_backlight(dev);
+
+ if (WARN_ON(dev_priv->backlight.device))
+ return -ENODEV;
+
+ (void) memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.brightness = dev_priv->backlight.level;
+
+ spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+ props.max_brightness = intel_panel_get_max_backlight(dev);
+ spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+ if (props.max_brightness == 0) {
+ DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
+ return -ENODEV;
+ }
+ dev_priv->backlight.device =
+ backlight_device_register("intel_backlight",
+ &connector->kdev, dev,
+ &intel_panel_bl_ops, &props);
+
+ if (IS_ERR(dev_priv->backlight.device)) {
+ DRM_ERROR("Failed to register backlight: %ld\n",
+ PTR_ERR(dev_priv->backlight.device));
+ dev_priv->backlight.device = NULL;
+ return -ENODEV;
+ }
+ return 0;
+}
+
+void intel_panel_destroy_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (dev_priv->backlight.device) {
+ backlight_device_unregister(dev_priv->backlight.device);
+ dev_priv->backlight.device = NULL;
+ }
+}
+#else
+int intel_panel_setup_backlight(struct drm_connector *connector)
+{
+ intel_panel_init_backlight(connector->dev);
+ return 0;
+}
+
+void intel_panel_destroy_backlight(struct drm_device *dev)
+{
+ return;
+}
+#endif
+
+int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode)
+ {
+ panel->fixed_mode = fixed_mode;
+
+ return 0;
+}
+
+void intel_panel_fini(struct intel_panel *panel)
+{
+ struct intel_connector *intel_connector =
+ container_of(panel, struct intel_connector, panel);
+
+ if (panel->fixed_mode)
+ drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+}
diff --git a/usr/src/uts/intel/io/i915/intel_pm.c b/usr/src/uts/intel/io/i915/intel_pm.c
new file mode 100644
index 0000000..baa4c0e
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_pm.c
@@ -0,0 +1,5177 @@
+/*
+ * Copyright © 2012-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include "drmP.h"
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include <sys/archsystm.h>
+
+#define FORCEWAKE_ACK_TIMEOUT_MS 2
+
+/* FBC, or Frame Buffer Compression, is a technique employed to compress the
+ * framebuffer contents in-memory, aiming at reducing the required bandwidth
+ * during in-memory transfers and, therefore, reduce the power packet.
+ *
+ * The benefits of FBC are mostly visible with solid backgrounds and
+ * variation-less patterns.
+ *
+ * FBC-related functionality can be enabled by the means of the
+ * i915.i915_enable_fbc parameter
+ */
+
+static bool intel_crtc_active(struct drm_crtc *crtc)
+{
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ */
+ return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
+}
+
+static void i8xx_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int cfb_pitch;
+ int plane, i;
+ u32 fbc_ctl, fbc_ctl2;
+
+ cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ if (fb->pitches[0] < cfb_pitch)
+ cfb_pitch = fb->pitches[0];
+
+ /* FBC_CTL wants 64B units */
+ cfb_pitch = (cfb_pitch / 64) - 1;
+ plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ I915_WRITE(FBC_TAG + (i * 4), 0);
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= plane;
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+
+ /* enable it... */
+ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
+ fbc_ctl |= obj->fence_reg;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
+ cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
+}
+
+static bool i8xx_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+ I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+ /* enable it... */
+ I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+}
+
+static void g4x_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool g4x_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void sandybridge_blit_fbc_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 blt_ecoskpd;
+
+ /* Make sure blitter notifies FBC of writes */
+ gen6_gt_force_wake_get(dev_priv);
+ blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT);
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ POSTING_READ(GEN6_BLITTER_ECOSKPD);
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl &= DPFC_RESERVED;
+ dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
+ /* Set persistent mode for front-buffer rendering, ala X. */
+ dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
+ dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+ I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+ /* enable it... */
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ sandybridge_blit_fbc_update(dev);
+ }
+
+ DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+}
+
+static void ironlake_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+ if (IS_IVYBRIDGE(dev))
+ /* WaFbcDisableDpfcClockGating:ivb */
+ I915_WRITE(ILK_DSPCLK_GATE_D,
+ I915_READ(ILK_DSPCLK_GATE_D) &
+ ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
+
+ if (IS_HASWELL(dev))
+ /* WaFbcDisableDpfcClockGating:hsw */
+ I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
+ I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
+ ~HSW_DPFC_GATING_DISABLE);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool ironlake_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
+
+ I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
+ IVB_DPFC_CTL_FENCE_EN |
+ intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
+
+ if (IS_IVYBRIDGE(dev)) {
+ /* WaFbcAsynchFlipDisableFbcQueue:ivb */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
+ /* WaFbcDisableDpfcClockGating:ivb */
+ I915_WRITE(ILK_DSPCLK_GATE_D,
+ I915_READ(ILK_DSPCLK_GATE_D) |
+ ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
+ } else {
+ /* WaFbcAsynchFlipDisableFbcQueue:hsw */
+ I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
+ HSW_BYPASS_FBC_QUEUE);
+ /* WaFbcDisableDpfcClockGating:hsw */
+ I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
+ I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
+ HSW_DPFC_GATING_DISABLE);
+ }
+
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+
+ sandybridge_blit_fbc_update(dev);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.fbc_enabled)
+ return false;
+
+ return dev_priv->display.fbc_enabled(dev);
+}
+
+static void intel_fbc_work_fn(struct work_struct *arg)
+{
+ struct intel_fbc_work *work = container_of(arg, struct intel_fbc_work,
+ work);
+ struct drm_device *dev = work->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ if (work == dev_priv->fbc_work) {
+ /* Double check that we haven't switched fb without cancelling
+ * the prior work.
+ */
+ if (work->crtc->fb == work->fb) {
+ dev_priv->display.enable_fbc(work->crtc,
+ work->interval);
+
+ dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->cfb_fb = work->crtc->fb->base.id;
+ dev_priv->cfb_y = work->crtc->y;
+ }
+
+ dev_priv->fbc_work = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(work, sizeof(struct intel_fbc_work));
+}
+
+void
+intel_fbc_work_timer(void *device)
+{
+ struct intel_fbc_work *work = (struct intel_fbc_work *)device;
+ struct drm_device *dev = work->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ queue_work(dev_priv->other_wq, &work->work);
+}
+
+static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->fbc_work == NULL)
+ return;
+
+ DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+ /* Synchronisation is provided by struct_mutex and checking of
+ * dev_priv->fbc_work, so we can perform the cancellation
+ * entirely asynchronously.
+ */
+ del_timer_sync(&dev_priv->fbc_timer);
+ cancel_delayed_work(dev_priv->other_wq);
+ /* tasklet was killed before being run, clean up */
+ kfree(dev_priv->fbc_work, sizeof(struct intel_fbc_work));
+
+ /* Mark the work as no longer wanted so that if it does
+ * wake-up (because the work was already running and waiting
+ * for our mutex), it will discover that is no longer
+ * necessary to run.
+ */
+ dev_priv->fbc_work = NULL;
+}
+
+void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct intel_fbc_work *work;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL) {
+ dev_priv->display.enable_fbc(crtc, interval);
+ return;
+ }
+
+ work->dev = crtc->dev;
+ work->crtc = crtc;
+ work->fb = crtc->fb;
+ work->interval = interval;
+
+ INIT_WORK(&work->work, intel_fbc_work_fn);
+ setup_timer(&dev_priv->fbc_timer, intel_fbc_work_timer,
+ (void *)work);
+
+ dev_priv->fbc_work = work;
+
+ DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting the compression. Note that
+ * this delay also serves a second purpose: it allows for a
+ * vblank to pass after disabling the FBC before we attempt
+ * to modify the control registers.
+ *
+ * A more complicated solution would involve tracking vblanks
+ * following the termination of the page-flipping sequence
+ * and indeed performing the enable as a co-routine and not
+ * waiting synchronously upon the vblank.
+ */
+ test_set_timer(&dev_priv->fbc_timer, msecs_to_jiffies(50));
+}
+
+void intel_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+ dev_priv->cfb_plane = -1;
+}
+
+/**
+ * intel_update_fbc - enable/disable FBC as needed
+ * @mode: mode in use
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+ * - plane A only (on pre-965)
+ * - no pixel multiply/line duplication
+ * - no alpha buffer discard
+ * - no dual wide
+ * - framebuffer <= 2048 in width, 1536 in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one. It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+void intel_update_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int enable_fbc;
+ unsigned int max_hdisplay, max_vdisplay;
+
+ if (!i915_powersave)
+ return;
+
+ if (!I915_HAS_FBC(dev))
+ return;
+
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+ * Need to disable if:
+ * - more than one pipe is active
+ * - changing FBC params (stride, fence, mode)
+ * - new fb is too large to fit in compressed buffer
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ list_for_each_entry(tmp_crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ if (intel_crtc_active(tmp_crtc) &&
+ !to_intel_crtc(tmp_crtc)->primary_disabled) {
+ if (crtc) {
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ goto out_disable;
+ }
+ crtc = tmp_crtc;
+ }
+ }
+
+ if (!crtc || crtc->fb == NULL) {
+ DRM_DEBUG_KMS("no output, disabling\n");
+ dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+ goto out_disable;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ fb = crtc->fb;
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ enable_fbc = i915_enable_fbc;
+ if (enable_fbc < 0) {
+ DRM_DEBUG_KMS("fbc set to per-chip default\n");
+ enable_fbc = 1;
+ if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ enable_fbc = 0;
+ }
+ if (!enable_fbc) {
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+ goto out_disable;
+ }
+ if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
+ (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
+ dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+ goto out_disable;
+ }
+
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ max_hdisplay = 4096;
+ max_vdisplay = 2048;
+ } else {
+ max_hdisplay = 2048;
+ max_vdisplay = 1536;
+ }
+ if ((crtc->mode.hdisplay > max_hdisplay) ||
+ (crtc->mode.vdisplay > max_vdisplay)) {
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+ goto out_disable;
+ }
+ if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
+ intel_crtc->plane != 0) {
+ DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+ goto out_disable;
+ }
+
+ /* The use of a CPU fence is mandatory in order to detect writes
+ * by the CPU to the scanout and trigger updates to the FBC.
+ */
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_NOT_TILED;
+ goto out_disable;
+ }
+
+ /* If the kernel debugger is active, always disable compression */
+ /*LINTED*/
+ if (in_dbg_master())
+ goto out_disable;
+
+ if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
+ DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ goto out_disable;
+ }
+
+ /* If the scanout has not changed, don't modify the FBC settings.
+ * Note that we make the fundamental assumption that the fb->obj
+ * cannot be unpinned (and have its GTT offset and fence revoked)
+ * without first being decoupled from the scanout and FBC disabled.
+ */
+ if (dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_fb == fb->base.id &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ if (intel_fbc_enabled(dev)) {
+ /* We update FBC along two paths, after changing fb/crtc
+ * configuration (modeswitching) and after page-flipping
+ * finishes. For the latter, we know that not only did
+ * we disable the FBC at the start of the page-flip
+ * sequence, but also more than one vblank has passed.
+ *
+ * For the former case of modeswitching, it is possible
+ * to switch between two FBC valid configurations
+ * instantaneously so we do need to disable the FBC
+ * before we can modify its control registers. We also
+ * have to wait for the next vblank for that to take
+ * effect. However, since we delay enabling FBC we can
+ * assume that a vblank has passed since disabling and
+ * that we can safely alter the registers in the deferred
+ * callback.
+ *
+ * In the scenario that we go from a valid to invalid
+ * and then back to valid FBC configuration we have
+ * no strict enforcement that a vblank occurred since
+ * disabling the FBC. However, along all current pipe
+ * disabling paths we do need to wait for a vblank at
+ * some point. And we wait before enabling FBC anyway.
+ */
+ DRM_DEBUG_KMS("disabling active FBC for update\n");
+ intel_disable_fbc(dev);
+ }
+
+ intel_enable_fbc(crtc, 500);
+ return;
+
+out_disable:
+ /* Multiple disables should be harmless */
+ if (intel_fbc_enabled(dev)) {
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+ intel_disable_fbc(dev);
+ }
+ i915_gem_stolen_cleanup_compression(dev);
+}
+
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ tmp = I915_READ(CLKCFG);
+
+ switch (tmp & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_533:
+ dev_priv->fsb_freq = 533; /* 133*4 */
+ break;
+ case CLKCFG_FSB_800:
+ dev_priv->fsb_freq = 800; /* 200*4 */
+ break;
+ case CLKCFG_FSB_667:
+ dev_priv->fsb_freq = 667; /* 167*4 */
+ break;
+ case CLKCFG_FSB_400:
+ dev_priv->fsb_freq = 400; /* 100*4 */
+ break;
+ }
+
+ switch (tmp & CLKCFG_MEM_MASK) {
+ case CLKCFG_MEM_533:
+ dev_priv->mem_freq = 533;
+ break;
+ case CLKCFG_MEM_667:
+ dev_priv->mem_freq = 667;
+ break;
+ case CLKCFG_MEM_800:
+ dev_priv->mem_freq = 800;
+ break;
+ }
+
+ /* detect pineview DDR3 setting */
+ tmp = I915_READ(CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 ddrpll, csipll;
+
+ ddrpll = I915_READ16(DDRMPLL1);
+ csipll = I915_READ16(CSIPLL0);
+
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ dev_priv->ips.r_t = dev_priv->mem_freq;
+
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+
+ if (dev_priv->fsb_freq == 3200) {
+ dev_priv->ips.c_m = 0;
+ } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+ dev_priv->ips.c_m = 1;
+ } else {
+ dev_priv->ips.c_m = 2;
+ }
+}
+
+static const struct cxsr_latency cxsr_latency_table[] = {
+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
+
+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
+
+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
+
+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
+
+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
+
+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
+};
+
+static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
+ int is_ddr3,
+ int fsb,
+ int mem)
+{
+ const struct cxsr_latency *latency;
+ int i;
+
+ if (fsb == 0 || mem == 0)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
+ latency = &cxsr_latency_table[i];
+ if (is_desktop == latency->is_desktop &&
+ is_ddr3 == latency->is_ddr3 &&
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ return latency;
+ }
+
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+
+ return NULL;
+}
+
+static void pineview_disable_cxsr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* deactivate cxsr */
+ I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+}
+
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ * - memory configuration (speed, channels)
+ * - chipset
+ * - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value. It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+static const int latency_ns = 5000;
+
+static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ if (plane)
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x1ff;
+ if (plane)
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static int i845_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 2; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A",
+ size);
+
+ return size;
+}
+
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+/* Pineview has different values for various configs */
+static const struct intel_watermark_params pineview_display_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_display_hplloff_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_HPLLOFF_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_cursor_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params g4x_wm_info = {
+ G4X_FIFO_SIZE,
+ G4X_MAX_WM,
+ G4X_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params g4x_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_wm_info = {
+ VALLEYVIEW_FIFO_SIZE,
+ VALLEYVIEW_MAX_WM,
+ VALLEYVIEW_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ VALLEYVIEW_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i965_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ I915_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i945_wm_info = {
+ I945_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i915_wm_info = {
+ I915_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i855_wm_info = {
+ I855GM_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i830_wm_info = {
+ I830_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params ironlake_display_wm_info = {
+ ILK_DISPLAY_FIFO,
+ ILK_DISPLAY_MAXWM,
+ ILK_DISPLAY_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_wm_info = {
+ ILK_CURSOR_FIFO,
+ ILK_CURSOR_MAXWM,
+ ILK_CURSOR_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_display_srwm_info = {
+ ILK_DISPLAY_SR_FIFO,
+ ILK_DISPLAY_MAX_SRWM,
+ ILK_DISPLAY_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_srwm_info = {
+ ILK_CURSOR_SR_FIFO,
+ ILK_CURSOR_MAX_SRWM,
+ ILK_CURSOR_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params sandybridge_display_wm_info = {
+ SNB_DISPLAY_FIFO,
+ SNB_DISPLAY_MAXWM,
+ SNB_DISPLAY_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_wm_info = {
+ SNB_CURSOR_FIFO,
+ SNB_CURSOR_MAXWM,
+ SNB_CURSOR_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_display_srwm_info = {
+ SNB_DISPLAY_SR_FIFO,
+ SNB_DISPLAY_MAX_SRWM,
+ SNB_DISPLAY_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
+ SNB_CURSOR_SR_FIFO,
+ SNB_CURSOR_MAX_SRWM,
+ SNB_CURSOR_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+
+/**
+ * intel_calculate_wm - calculate watermark level
+ * @clock_in_khz: pixel clock
+ * @wm: chip FIFO params
+ * @pixel_size: display pixel size
+ * @latency_ns: memory latency for the platform
+ *
+ * Calculate the watermark level (the level at which the display plane will
+ * start fetching from memory again). Each chip has a different display
+ * FIFO size and allocation, so the caller needs to figure that out and pass
+ * in the correct intel_watermark_params structure.
+ *
+ * As the pixel clock runs, the FIFO will be drained at a rate that depends
+ * on the pixel size. When it reaches the watermark level, it'll start
+ * fetching FIFO line sized based chunks from memory until the FIFO fills
+ * past the watermark point. If the FIFO drains completely, a FIFO underrun
+ * will occur, and a display engine hang could result.
+ */
+static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
+ const struct intel_watermark_params *wm,
+ int fifo_size,
+ int pixel_size,
+ unsigned long latency_ns)
+{
+ long entries_required, wm_size;
+
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+ 1000;
+ entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
+
+ DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
+
+ wm_size = fifo_size - (entries_required + wm->guard_size);
+
+ DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
+
+ /* Don't promote wm_size to unsigned... */
+ if (wm_size > (long)wm->max_wm)
+ wm_size = wm->max_wm;
+ if (wm_size <= 0)
+ wm_size = wm->default_wm;
+ return wm_size;
+}
+
+static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+{
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ if (intel_crtc_active(crtc)) {
+ if (enabled)
+ return NULL;
+ enabled = crtc;
+ }
+ }
+
+ return enabled;
+}
+
+static void pineview_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ const struct cxsr_latency *latency;
+ u32 reg;
+ unsigned long wm;
+
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ if (!latency) {
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
+ return;
+ }
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ int clock = crtc->mode.clock;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Display SR */
+ wm = intel_calculate_wm(clock, &pineview_display_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->display_sr);
+ reg = I915_READ(DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= wm << DSPFW_SR_SHIFT;
+ I915_WRITE(DSPFW1, reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->cursor_sr);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_CURSOR_SR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->display_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_SR_MASK;
+ reg |= wm & DSPFW_HPLL_SR_MASK;
+ I915_WRITE(DSPFW3, reg);
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->cursor_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+ /* activate cxsr */
+ I915_WRITE(DSPFW3,
+ I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
+ DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ } else {
+ pineview_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ }
+}
+
+static bool g4x_compute_wm0(struct drm_device *dev,
+ int plane,
+ const struct intel_watermark_params *display,
+ int display_latency_ns,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency_ns,
+ int *plane_wm,
+ int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int htotal, hdisplay, clock, pixel_size;
+ int line_time_us, line_count;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (!intel_crtc_active(crtc)) {
+ *cursor_wm = cursor->guard_size;
+ *plane_wm = display->guard_size;
+ return false;
+ }
+
+ htotal = crtc->mode.htotal;
+ hdisplay = crtc->mode.hdisplay;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Use the small buffer method to calculate plane watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
+
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = ((htotal * 1000) / clock);
+ line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
+ entries = line_count * 64 * pixel_size;
+ tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
+
+ return true;
+}
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool g4x_check_srwm(struct drm_device *dev,
+ int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
+ display_wm, cursor_wm);
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+ display_wm, display->max_wm);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+ cursor_wm, cursor->max_wm);
+ return false;
+ }
+
+ if (!(display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("SR latency is 0, disabling\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool g4x_compute_srwm(struct drm_device *dev,
+ int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int hdisplay, htotal, pixel_size, clock;
+ unsigned long line_time_us;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return g4x_check_srwm(dev,
+ *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+static bool vlv_compute_drain_latency(struct drm_device *dev,
+ int plane,
+ int *plane_prec_mult,
+ int *plane_dl,
+ int *cursor_prec_mult,
+ int *cursor_dl)
+{
+ struct drm_crtc *crtc;
+ int clock, pixel_size;
+ int entries;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (!intel_crtc_active(crtc))
+ return false;
+
+ clock = crtc->mode.clock; /* VESA DOT Clock */
+ pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
+
+ entries = (clock / 1000) * pixel_size;
+ *plane_prec_mult = (entries > 256) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+ *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
+ pixel_size);
+
+ entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
+ *cursor_prec_mult = (entries > 256) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+ *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
+
+ return true;
+}
+
+/*
+ * Update drain latency registers of memory arbiter
+ *
+ * Valleyview SoC has a new memory arbiter and needs drain latency registers
+ * to be programmed. Each plane has a drain latency multiplier and a drain
+ * latency value.
+ */
+
+static void vlv_update_drain_latency(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_prec, planea_dl, planeb_prec, planeb_dl;
+ int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
+ int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
+ either 16 or 32 */
+
+ /* For plane A, Cursor A */
+ if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
+ &cursor_prec_mult, &cursora_dl)) {
+ cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
+ planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
+
+ I915_WRITE(VLV_DDL1, cursora_prec |
+ (cursora_dl << DDL_CURSORA_SHIFT) |
+ planea_prec | planea_dl);
+ }
+
+ /* For plane B, Cursor B */
+ if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
+ &cursor_prec_mult, &cursorb_dl)) {
+ cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
+ planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
+
+ I915_WRITE(VLV_DDL2, cursorb_prec |
+ (cursorb_dl << DDL_CURSORB_SHIFT) |
+ planeb_prec | planeb_dl);
+ }
+}
+
+#define single_plane_enabled(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
+
+static void valleyview_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ int ignore_plane_sr, ignore_cursor_sr;
+ unsigned int enabled = 0;
+
+ vlv_update_drain_latency(dev);
+
+ if (g4x_compute_wm0(dev, PIPE_A,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1 << PIPE_A;
+
+ if (g4x_compute_wm0(dev, PIPE_B,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 1 << PIPE_B;
+
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &plane_sr, &ignore_cursor_sr) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ 2*sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &ignore_plane_sr, &cursor_sr)) {
+ I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+ } else {
+ I915_WRITE(FW_BLC_SELF_VLV,
+ I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+ plane_sr = cursor_sr = 0;
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void g4x_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ if (g4x_compute_wm0(dev, PIPE_A,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1 << PIPE_A;
+
+ if (g4x_compute_wm0(dev, PIPE_B,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 1 << PIPE_B;
+
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &g4x_wm_info,
+ &g4x_cursor_wm_info,
+ &plane_sr, &cursor_sr)) {
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ I915_WRITE(FW_BLC_SELF,
+ I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+ plane_sr = cursor_sr = 0;
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ /* HPLL off in SR has some issues on G4x... disable it */
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i965_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ int srwm = 1;
+ int cursor_sr = 16;
+
+ /* Calc sr entries for one plane configs */
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 12000;
+ int clock = crtc->mode.clock;
+ int htotal = crtc->mode.htotal;
+ int hdisplay = crtc->mode.hdisplay;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = ((htotal * 1000) / clock);
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+ srwm = I965_FIFO_SIZE - entries;
+ if (srwm < 0)
+ srwm = 1;
+ srwm &= 0x1ff;
+ DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
+
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * 64;
+ entries = DIV_ROUND_UP(entries,
+ i965_cursor_wm_info.cacheline_size);
+ cursor_sr = i965_cursor_wm_info.fifo_size -
+ (entries + i965_cursor_wm_info.guard_size);
+
+ if (cursor_sr > i965_cursor_wm_info.max_wm)
+ cursor_sr = (int) i965_cursor_wm_info.max_wm;
+
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
+
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
+
+ /* 965 has limitations... */
+ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
+ (8 << 16) | (8 << 8) | (8 << 0));
+ I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+ /* update cursor SR watermark */
+ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i9xx_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_watermark_params *wm_info;
+ uint32_t fwater_lo;
+ uint32_t fwater_hi;
+ int cwm, srwm = 1;
+ int fifo_size;
+ int planea_wm, planeb_wm;
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ if (IS_I945GM(dev))
+ wm_info = &i945_wm_info;
+ else if (!IS_GEN2(dev))
+ wm_info = &i915_wm_info;
+ else
+ wm_info = &i855_wm_info;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ crtc = intel_get_crtc_for_plane(dev, 0);
+ if (intel_crtc_active(crtc)) {
+ int cpp = crtc->fb->bits_per_pixel / 8;
+ if (IS_GEN2(dev))
+ cpp = 4;
+
+ planea_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size, cpp,
+ latency_ns);
+ enabled = crtc;
+ } else
+ planea_wm = fifo_size - wm_info->guard_size;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ crtc = intel_get_crtc_for_plane(dev, 1);
+ if (intel_crtc_active(crtc)) {
+ int cpp = crtc->fb->bits_per_pixel / 8;
+ if (IS_GEN2(dev))
+ cpp = 4;
+
+ planeb_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size, cpp,
+ latency_ns);
+ if (enabled == NULL)
+ enabled = crtc;
+ else
+ enabled = NULL;
+ } else
+ planeb_wm = fifo_size - wm_info->guard_size;
+
+ DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
+ /*
+ * Overlay gets an aggressive default since video jitter is bad.
+ */
+ cwm = 2;
+
+ /* Play safe and disable self-refresh before adjusting watermarks. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+
+ /* Calc sr entries for one plane configs */
+ if (HAS_FW_BLC(dev) && enabled) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 6000;
+ int clock = enabled->mode.clock;
+ int htotal = enabled->mode.htotal;
+ int hdisplay = enabled->mode.hdisplay;
+ int pixel_size = enabled->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = (htotal * 1000) / clock;
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+ srwm = wm_info->fifo_size - entries;
+ if (srwm < 0)
+ srwm = 1;
+
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else if (IS_I915GM(dev))
+ I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ planea_wm, planeb_wm, cwm, srwm);
+
+ fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+ fwater_hi = (cwm & 0x1f);
+
+ /* Set request length to 8 cachelines per fetch */
+ fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
+ fwater_hi = fwater_hi | (1 << 8);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+ I915_WRITE(FW_BLC2, fwater_hi);
+
+ if (HAS_FW_BLC(dev)) {
+ if (enabled) {
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ DRM_DEBUG_KMS("memory self refresh enabled\n");
+ } else
+ DRM_DEBUG_KMS("memory self refresh disabled\n");
+ }
+}
+
+static void i830_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ uint32_t fwater_lo;
+ int planea_wm;
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc == NULL)
+ return;
+
+ planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+ dev_priv->display.get_fifo_size(dev, 0),
+ 4, latency_ns);
+ fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ fwater_lo |= (3<<8) | planea_wm;
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+}
+
+#define ILK_LP0_PLANE_LATENCY 700
+#define ILK_LP0_CURSOR_LATENCY 1300
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool ironlake_check_srwm(struct drm_device *dev, int level,
+ int fbc_wm, int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+ " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+ if (fbc_wm > SNB_FBC_MAX_SRWM) {
+ DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+ fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+ /* fbc has it's own way to disable FBC WM */
+ I915_WRITE(DISP_ARB_CTL,
+ I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+ return false;
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ /* enable FBC WM (except on ILK, where it must remain off) */
+ I915_WRITE(DISP_ARB_CTL,
+ I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
+ }
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+ display_wm, SNB_DISPLAY_MAX_SRWM, level);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+ cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+ return false;
+ }
+
+ if (!(fbc_wm || display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Compute watermark values of WM[1-3],
+ */
+static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *fbc_wm, int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int hdisplay, htotal, pixel_size, clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *fbc_wm = *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /*
+ * Spec says:
+ * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+ */
+ *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return ironlake_check_srwm(dev, level,
+ *fbc_wm, *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+static void ironlake_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, PIPE_A,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1 << PIPE_A;
+ }
+
+ if (g4x_compute_wm0(dev, PIPE_B,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1 << PIPE_B;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled))
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ ILK_READ_WM1_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ ILK_READ_WM2_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /*
+ * WM3 is unsupported on ILK, probably because we don't have latency
+ * data for that power state
+ */
+}
+
+static void sandybridge_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, PIPE_A,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1 << PIPE_A;
+ }
+
+ if (g4x_compute_wm0(dev, PIPE_B,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1 << PIPE_B;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
+static void ivybridge_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int fbc_wm, plane_wm, cursor_wm;
+ int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, PIPE_A,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1 << PIPE_A;
+ }
+
+ if (g4x_compute_wm0(dev, PIPE_B,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1 << PIPE_B;
+ }
+
+ if (g4x_compute_wm0(dev, PIPE_C,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEC_IVB);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEC_IVB, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1 << PIPE_C;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3, note we have to correct the cursor latency */
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
+ !ironlake_compute_srwm(dev, 3, enabled,
+ 2 * SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
+static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pixel_rate, pfit_size;
+
+ pixel_rate = intel_crtc->config.adjusted_mode.clock;
+
+ /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
+ * adjust the pixel_rate here. */
+
+ pfit_size = intel_crtc->config.pch_pfit.size;
+ if (pfit_size) {
+ uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
+
+ pipe_w = intel_crtc->config.requested_mode.hdisplay;
+ pipe_h = intel_crtc->config.requested_mode.vdisplay;
+ pfit_w = (pfit_size >> 16) & 0xFFFF;
+ pfit_h = pfit_size & 0xFFFF;
+ if (pipe_w < pfit_w)
+ pipe_w = pfit_w;
+ if (pipe_h < pfit_h)
+ pipe_h = pfit_h;
+
+ pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
+ pfit_w * pfit_h);
+ }
+
+ return pixel_rate;
+}
+
+static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
+ uint32_t latency)
+{
+ uint64_t ret;
+
+ ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
+ ret = POS_DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
+
+ return (uint32_t)ret;
+}
+
+static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
+ uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+ uint32_t latency)
+{
+ uint32_t ret;
+
+ ret = (latency * pixel_rate) / (pipe_htotal * 10000);
+ ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+ ret = DIV_ROUND_UP(ret, 64) + 2;
+ return ret;
+}
+
+static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
+ uint8_t bytes_per_pixel)
+{
+ return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
+}
+
+struct hsw_pipe_wm_parameters {
+ bool active;
+ bool sprite_enabled;
+ uint8_t pri_bytes_per_pixel;
+ uint8_t spr_bytes_per_pixel;
+ uint8_t cur_bytes_per_pixel;
+ uint32_t pri_horiz_pixels;
+ uint32_t spr_horiz_pixels;
+ uint32_t cur_horiz_pixels;
+ uint32_t pipe_htotal;
+ uint32_t pixel_rate;
+};
+
+struct hsw_wm_maximums {
+ uint16_t pri;
+ uint16_t spr;
+ uint16_t cur;
+ uint16_t fbc;
+};
+
+struct hsw_lp_wm_result {
+ bool enable;
+ bool fbc_enable;
+ uint32_t pri_val;
+ uint32_t spr_val;
+ uint32_t cur_val;
+ uint32_t fbc_val;
+};
+
+struct hsw_wm_values {
+ uint32_t wm_pipe[3];
+ uint32_t wm_lp[3];
+ uint32_t wm_lp_spr[3];
+ uint32_t wm_linetime[3];
+ bool enable_fbc_wm;
+};
+
+enum hsw_data_buf_partitioning {
+ HSW_DATA_BUF_PART_1_2,
+ HSW_DATA_BUF_PART_5_6,
+};
+
+/* For both WM_PIPE and WM_LP. */
+static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
+ uint32_t mem_value,
+ bool is_lp)
+{
+ uint32_t method1, method2;
+
+ /* TODO: for now, assume the primary plane is always enabled. */
+ if (!params->active)
+ return 0;
+
+ method1 = hsw_wm_method1(params->pixel_rate,
+ params->pri_bytes_per_pixel,
+ mem_value);
+
+ if (!is_lp)
+ return method1;
+
+ method2 = hsw_wm_method2(params->pixel_rate,
+ params->pipe_htotal,
+ params->pri_horiz_pixels,
+ params->pri_bytes_per_pixel,
+ mem_value);
+
+ return min(method1, method2);
+}
+
+/* For both WM_PIPE and WM_LP. */
+static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
+ uint32_t mem_value)
+{
+ uint32_t method1, method2;
+
+ if (!params->active || !params->sprite_enabled)
+ return 0;
+
+ method1 = hsw_wm_method1(params->pixel_rate,
+ params->spr_bytes_per_pixel,
+ mem_value);
+ method2 = hsw_wm_method2(params->pixel_rate,
+ params->pipe_htotal,
+ params->spr_horiz_pixels,
+ params->spr_bytes_per_pixel,
+ mem_value);
+ return min(method1, method2);
+}
+
+/* For both WM_PIPE and WM_LP. */
+static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
+ uint32_t mem_value)
+{
+ if (!params->active)
+ return 0;
+
+ return hsw_wm_method2(params->pixel_rate,
+ params->pipe_htotal,
+ params->cur_horiz_pixels,
+ params->cur_bytes_per_pixel,
+ mem_value);
+}
+
+/* Only for WM_LP. */
+static uint32_t hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
+ uint32_t pri_val,
+ uint32_t mem_value)
+{
+ if (!params->active)
+ return 0;
+
+ return hsw_wm_fbc(pri_val,
+ params->pri_horiz_pixels,
+ params->pri_bytes_per_pixel);
+}
+
+static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max,
+ struct hsw_pipe_wm_parameters *params,
+ struct hsw_lp_wm_result *result)
+{
+ enum pipe pipe;
+ uint32_t pri_val[3], spr_val[3], cur_val[3], fbc_val[3];
+
+ for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
+ struct hsw_pipe_wm_parameters *p = &params[pipe];
+
+ pri_val[pipe] = hsw_compute_pri_wm(p, mem_value, true);
+ spr_val[pipe] = hsw_compute_spr_wm(p, mem_value);
+ cur_val[pipe] = hsw_compute_cur_wm(p, mem_value);
+ fbc_val[pipe] = hsw_compute_fbc_wm(p, pri_val[pipe], mem_value);
+ }
+
+ result->pri_val = max(max(pri_val[0], pri_val[1]), pri_val[2]);
+ result->spr_val = max(max(spr_val[0], spr_val[1]), spr_val[2]);
+ result->cur_val = max(max(cur_val[0], cur_val[1]), cur_val[2]);
+ result->fbc_val = max(max(fbc_val[0], fbc_val[1]), fbc_val[2]);
+
+ if (result->fbc_val > max->fbc) {
+ result->fbc_enable = false;
+ result->fbc_val = 0;
+ } else {
+ result->fbc_enable = true;
+ }
+
+ result->enable = result->pri_val <= max->pri &&
+ result->spr_val <= max->spr &&
+ result->cur_val <= max->cur;
+ return result->enable;
+}
+
+static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
+ uint32_t mem_value, enum pipe pipe,
+ struct hsw_pipe_wm_parameters *params)
+{
+ uint32_t pri_val, cur_val, spr_val;
+
+ pri_val = hsw_compute_pri_wm(params, mem_value, false);
+ spr_val = hsw_compute_spr_wm(params, mem_value);
+ cur_val = hsw_compute_cur_wm(params, mem_value);
+
+ if(pri_val > 127)
+ DRM_ERROR("Primary WM error, mode not supported for pipe %c\n",
+ pipe_name(pipe));
+ if(spr_val > 127)
+ DRM_ERROR("Sprite WM error, mode not supported for pipe %c\n",
+ pipe_name(pipe));
+ if(cur_val > 63)
+ DRM_ERROR("Cursor WM error, mode not supported for pipe %c\n",
+ pipe_name(pipe));
+
+ return (pri_val << WM0_PIPE_PLANE_SHIFT) |
+ (spr_val << WM0_PIPE_SPRITE_SHIFT) |
+ cur_val;
+}
+
+static uint32_t
+hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+ u32 linetime, ips_linetime;
+
+ if (!intel_crtc_active(crtc))
+ return 0;
+
+ /* The WM are computed with base on how long it takes to fill a single
+ * row at the given clock rate, multiplied by 8.
+ * */
+ linetime = POS_DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
+ ips_linetime = POS_DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
+ intel_ddi_get_cdclk_freq(dev_priv));
+
+ return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
+ PIPE_WM_LINETIME_TIME(linetime);
+}
+
+static void hsw_compute_wm_parameters(struct drm_device *dev,
+ struct hsw_pipe_wm_parameters *params,
+ uint32_t *wm,
+ struct hsw_wm_maximums *lp_max_1_2,
+ struct hsw_wm_maximums *lp_max_5_6)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ uint64_t sskpd = I915_READ64(MCH_SSKPD);
+ enum pipe pipe;
+ int pipes_active = 0, sprites_enabled = 0;
+
+ if ((sskpd >> 56) & 0xFF)
+ wm[0] = (sskpd >> 56) & 0xFF;
+ else
+ wm[0] = sskpd & 0xF;
+ wm[1] = ((sskpd >> 4) & 0xFF) * 5;
+ wm[2] = ((sskpd >> 12) & 0xFF) * 5;
+ wm[3] = ((sskpd >> 20) & 0x1FF) * 5;
+ wm[4] = ((sskpd >> 32) & 0x1FF) * 5;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct hsw_pipe_wm_parameters *p;
+
+ pipe = intel_crtc->pipe;
+ p = &params[pipe];
+
+ p->active = intel_crtc_active(crtc);
+ if (!p->active)
+ continue;
+
+ pipes_active++;
+
+ p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
+ p->pixel_rate = hsw_wm_get_pixel_rate(dev, crtc);
+ p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
+ p->cur_bytes_per_pixel = 4;
+ p->pri_horiz_pixels =
+ intel_crtc->config.requested_mode.hdisplay;
+ p->cur_horiz_pixels = 64;
+ }
+
+ list_for_each_entry(plane, struct drm_plane, &dev->mode_config.plane_list, head) {
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct hsw_pipe_wm_parameters *p;
+
+ pipe = intel_plane->pipe;
+ p = &params[pipe];
+
+ p->sprite_enabled = intel_plane->wm.enable;
+ p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel;
+ p->spr_horiz_pixels = intel_plane->wm.horiz_pixels;
+
+ if (p->sprite_enabled)
+ sprites_enabled++;
+ }
+
+ if (pipes_active > 1) {
+ lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256;
+ lp_max_1_2->spr = lp_max_5_6->spr = 128;
+ lp_max_1_2->cur = lp_max_5_6->cur = 64;
+ } else {
+ lp_max_1_2->pri = sprites_enabled ? 384 : 768;
+ lp_max_5_6->pri = sprites_enabled ? 128 : 768;
+ lp_max_1_2->spr = 384;
+ lp_max_5_6->spr = 640;
+ lp_max_1_2->cur = lp_max_5_6->cur = 255;
+ }
+ lp_max_1_2->fbc = lp_max_5_6->fbc = 15;
+}
+
+static void hsw_compute_wm_results(struct drm_device *dev,
+ struct hsw_pipe_wm_parameters *params,
+ uint32_t *wm,
+ struct hsw_wm_maximums *lp_maximums,
+ struct hsw_wm_values *results)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct hsw_lp_wm_result lp_results[4];
+ enum pipe pipe;
+ int level, max_level, wm_lp;
+
+ for (level = 1; level <= 4; level++)
+ if (!hsw_compute_lp_wm(wm[level], lp_maximums, params,
+ &lp_results[level - 1]))
+ break;
+ max_level = level - 1;
+
+ /* The spec says it is preferred to disable FBC WMs instead of disabling
+ * a WM level. */
+ results->enable_fbc_wm = true;
+ for (level = 1; level <= max_level; level++) {
+ if (!lp_results[level - 1].fbc_enable) {
+ results->enable_fbc_wm = false;
+ break;
+ }
+ }
+
+ memset(results, 0, sizeof(*results));
+ for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+ const struct hsw_lp_wm_result *r;
+
+ level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
+ if (level > max_level)
+ break;
+
+ r = &lp_results[level - 1];
+ results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
+ r->fbc_val,
+ r->pri_val,
+ r->cur_val);
+ results->wm_lp_spr[wm_lp - 1] = r->spr_val;
+ }
+
+ for_each_pipe(pipe)
+ results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, wm[0],
+ pipe,
+ &params[pipe]);
+
+ for_each_pipe(pipe) {
+ crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
+ }
+}
+
+/* Find the result with the highest level enabled. Check for enable_fbc_wm in
+ * case both are at the same level. Prefer r1 in case they're the same. */
+struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
+ struct hsw_wm_values *r2)
+{
+ int i, val_r1 = 0, val_r2 = 0;
+
+ for (i = 0; i < 3; i++) {
+ if (r1->wm_lp[i] & WM3_LP_EN)
+ val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
+ if (r2->wm_lp[i] & WM3_LP_EN)
+ val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
+ }
+
+ if (val_r1 == val_r2) {
+ if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
+ return r2;
+ else
+ return r1;
+ } else if (val_r1 > val_r2) {
+ return r1;
+ } else {
+ return r2;
+ }
+}
+
+/*
+ * The spec says we shouldn't write when we don't need, because every write
+ * causes WMs to be re-evaluated, expending some power.
+ */
+static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
+ struct hsw_wm_values *results,
+ enum hsw_data_buf_partitioning partitioning)
+{
+ struct hsw_wm_values previous;
+ uint32_t val;
+ enum hsw_data_buf_partitioning prev_partitioning;
+ bool prev_enable_fbc_wm;
+
+ previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
+ previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
+ previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
+ previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
+ previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
+ previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
+ previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+ previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
+ previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+ previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
+ previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
+ previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
+
+ prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+ HSW_DATA_BUF_PART_5_6 : HSW_DATA_BUF_PART_1_2;
+
+ prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+
+ if (memcmp(results->wm_pipe, previous.wm_pipe,
+ sizeof(results->wm_pipe)) == 0 &&
+ memcmp(results->wm_lp, previous.wm_lp,
+ sizeof(results->wm_lp)) == 0 &&
+ memcmp(results->wm_lp_spr, previous.wm_lp_spr,
+ sizeof(results->wm_lp_spr)) == 0 &&
+ memcmp(results->wm_linetime, previous.wm_linetime,
+ sizeof(results->wm_linetime)) == 0 &&
+ partitioning == prev_partitioning &&
+ results->enable_fbc_wm == prev_enable_fbc_wm)
+ return;
+
+ if (previous.wm_lp[2] != 0)
+ I915_WRITE(WM3_LP_ILK, 0);
+ if (previous.wm_lp[1] != 0)
+ I915_WRITE(WM2_LP_ILK, 0);
+ if (previous.wm_lp[0] != 0)
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (previous.wm_pipe[0] != results->wm_pipe[0])
+ I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
+ if (previous.wm_pipe[1] != results->wm_pipe[1])
+ I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
+ if (previous.wm_pipe[2] != results->wm_pipe[2])
+ I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
+
+ if (previous.wm_linetime[0] != results->wm_linetime[0])
+ I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
+ if (previous.wm_linetime[1] != results->wm_linetime[1])
+ I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
+ if (previous.wm_linetime[2] != results->wm_linetime[2])
+ I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
+
+ if (prev_partitioning != partitioning) {
+ val = I915_READ(WM_MISC);
+ if (partitioning == HSW_DATA_BUF_PART_1_2)
+ val &= ~WM_MISC_DATA_PARTITION_5_6;
+ else
+ val |= WM_MISC_DATA_PARTITION_5_6;
+ I915_WRITE(WM_MISC, val);
+ }
+
+ if (prev_enable_fbc_wm != results->enable_fbc_wm) {
+ val = I915_READ(DISP_ARB_CTL);
+ if (results->enable_fbc_wm)
+ val &= ~DISP_FBC_WM_DIS;
+ else
+ val |= DISP_FBC_WM_DIS;
+ I915_WRITE(DISP_ARB_CTL, val);
+ }
+
+ if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
+ I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
+ if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
+ I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
+ if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
+ I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
+
+ if (results->wm_lp[0] != 0)
+ I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
+ if (results->wm_lp[1] != 0)
+ I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
+ if (results->wm_lp[2] != 0)
+ I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+}
+
+static void haswell_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
+ struct hsw_pipe_wm_parameters params[3];
+ struct hsw_wm_values results_1_2, results_5_6, *best_results;
+ uint32_t wm[5];
+ enum hsw_data_buf_partitioning partitioning;
+
+ hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6);
+
+ hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2);
+ if (lp_max_1_2.pri != lp_max_5_6.pri) {
+ hsw_compute_wm_results(dev, params, wm, &lp_max_5_6,
+ &results_5_6);
+ best_results = hsw_find_best_result(&results_1_2, &results_5_6);
+ } else {
+ best_results = &results_1_2;
+ }
+
+ partitioning = (best_results == &results_1_2) ?
+ HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6;
+
+ hsw_write_wm_values(dev_priv, best_results, partitioning);
+}
+
+static void haswell_update_sprite_wm(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size,
+ bool enable)
+{
+ struct drm_plane *plane;
+
+ list_for_each_entry(plane, struct drm_plane, &dev->mode_config.plane_list, head) {
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ if (intel_plane->pipe == pipe) {
+ intel_plane->wm.enable = enable;
+ intel_plane->wm.horiz_pixels = sprite_width + 1;
+ intel_plane->wm.bytes_per_pixel = (uint8_t)pixel_size;
+ break;
+ }
+ }
+
+ haswell_update_wm(dev);
+}
+
+static bool
+sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int display_latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ int clock;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (!intel_crtc_active(crtc)) {
+ *sprite_wm = display->guard_size;
+ return false;
+ }
+
+ clock = crtc->mode.clock;
+
+ /* Use the small buffer method to calculate the sprite watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size -
+ sprite_width * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+ if (*sprite_wm > (int)display->max_wm)
+ *sprite_wm = display->max_wm;
+
+ return true;
+}
+
+static bool
+sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ clock = crtc->mode.clock;
+ if (!clock) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_time_us = (sprite_width * 1000) / clock;
+ if (!line_time_us) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = sprite_width * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+
+ return *sprite_wm > 0x3ff ? false : true;
+}
+
+static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int sprite_wm, reg;
+ int ret;
+
+ if (!enable)
+ return;
+
+ switch (pipe) {
+ case 0:
+ reg = WM0_PIPEA_ILK;
+ break;
+ case 1:
+ reg = WM0_PIPEB_ILK;
+ break;
+ case 2:
+ reg = WM0_PIPEC_IVB;
+ break;
+ default:
+ return; /* bad pipe */
+ }
+
+ ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
+ &sandybridge_display_wm_info,
+ latency, &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
+ pipe_name(pipe));
+ return;
+ }
+
+ val = I915_READ(reg);
+ val &= ~WM0_PIPE_SPRITE_MASK;
+ I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+ DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
+
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
+ pipe_name(pipe));
+ return;
+ }
+ I915_WRITE(WM1S_LP_ILK, sprite_wm);
+
+ /* Only IVB has two more LP watermarks for sprite */
+ if (!IS_IVYBRIDGE(dev))
+ return;
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
+ pipe_name(pipe));
+ return;
+ }
+ I915_WRITE(WM2S_LP_IVB, sprite_wm);
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
+ pipe_name(pipe));
+ return;
+ }
+ I915_WRITE(WM3S_LP_IVB, sprite_wm);
+}
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void intel_update_watermarks(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_wm)
+ dev_priv->display.update_wm(dev);
+}
+
+void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_sprite_wm)
+ dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
+ pixel_size, enable);
+}
+
+static struct drm_i915_gem_object *
+intel_alloc_context_page(struct drm_device *dev)
+{
+ struct drm_i915_gem_object *ctx;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ ctx = i915_gem_alloc_object(dev, 4096);
+ if (!ctx) {
+ DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
+ return NULL;
+ }
+
+ ret = i915_gem_object_pin(ctx, 4096, true, false);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n", ret);
+ goto err_unref;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
+ if (ret) {
+ DRM_ERROR("failed to set-domain on power context: %d\n", ret);
+ goto err_unpin;
+ }
+
+ return ctx;
+
+err_unpin:
+ i915_gem_object_unpin(ctx);
+err_unref:
+ drm_gem_object_unreference(&ctx->base);
+ return NULL;
+}
+
+/**
+ * Lock protecting IPS related data structures
+ */
+spinlock_t mchdev_lock;
+
+/* Global for IPS driver to get at the current i915 device. Protected by
+ * mchdev_lock. */
+static struct drm_i915_private *i915_mch_dev;
+
+bool ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl;
+
+ ASSERT(MUTEX_HELD(&mchdev_lock));
+
+ rgvswctl = I915_READ16(MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+ POSTING_READ16(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
+static void ironlake_enable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u8 fmax, fmin, fstart, vstart;
+
+ spin_lock_irq(&mchdev_lock);
+
+ /* Enable temp reporting */
+ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+ I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+
+ /* 100ms RC evaluation intervals */
+ I915_WRITE(RCUPEI, 100000);
+ I915_WRITE(RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ I915_WRITE(RCBMAXAVG, 90000);
+ I915_WRITE(RCBMINAVG, 80000);
+
+ I915_WRITE(MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+
+ vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+
+ dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
+ dev_priv->ips.fstart = fstart;
+
+ dev_priv->ips.max_delay = fstart;
+ dev_priv->ips.min_delay = fmin;
+ dev_priv->ips.cur_delay = fstart;
+
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
+
+ I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ /*
+ * Interrupts will be enabled in ironlake_irq_postinstall
+ */
+
+ I915_WRITE(VIDSTART, vstart);
+ POSTING_READ(VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ I915_WRITE(MEMMODECTL, rgvmodectl);
+
+ if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+ DRM_ERROR("stuck trying to change perf mode\n");
+ msleep(1);
+
+ (void) ironlake_set_drps(dev, fstart);
+
+ dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ I915_READ(0x112e0);
+ dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
+ dev_priv->ips.last_count2 = I915_READ(0x112f4);
+ dev_priv->ips.last_time2 = jiffies;
+
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static void ironlake_disable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ rgvswctl = I915_READ16(MEMSWCTL);
+
+ /* Ack interrupts, disable EFC interrupt */
+ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
+ I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
+ I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ (void) ironlake_set_drps(dev, dev_priv->ips.fstart);
+ msleep(1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ msleep(1);
+
+ spin_unlock_irq(&mchdev_lock);
+}
+
+/* There's a funny hw issue where the hw returns all 0 when reading from
+ * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
+ * ourselves, instead of doing a rmw cycle (which might result in us clearing
+ * all limits and the gpu stuck at whatever frequency it is at atm).
+ */
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
+{
+ u32 limits;
+
+ limits = 0;
+
+ if (*val >= dev_priv->rps.max_delay)
+ *val = dev_priv->rps.max_delay;
+ limits |= dev_priv->rps.max_delay << 24;
+
+ /* Only set the down limit when we've reached the lowest level to avoid
+ * getting more interrupts, otherwise leave this clear. This prevents a
+ * race in the hw when coming out of rc6: There's a tiny window where
+ * the hw runs at the minimal clock before selecting the desired
+ * frequency, if the down threshold expires in that window we will not
+ * receive a down interrupt. */
+ if (*val <= dev_priv->rps.min_delay) {
+ *val = dev_priv->rps.min_delay;
+ limits |= dev_priv->rps.min_delay << 16;
+ }
+
+ return limits;
+}
+
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 limits = gen6_rps_limits(dev_priv, &val);
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(val > dev_priv->rps.max_delay);
+ WARN_ON(val < dev_priv->rps.min_delay);
+
+ if (val == dev_priv->rps.cur_delay)
+ return;
+
+ if (IS_HASWELL(dev))
+ I915_WRITE(GEN6_RPNSWREQ,
+ HSW_FREQUENCY(val));
+ else
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+
+ /* Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+
+ POSTING_READ(GEN6_RPNSWREQ);
+
+ dev_priv->rps.cur_delay = (u8)val;
+}
+
+/*
+ * Wait until the previous freq change has completed,
+ * or the timeout elapsed, and then update our notion
+ * of the current GPU frequency.
+ */
+static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(10);
+ u32 pval;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ do {
+ pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ if (time_after(jiffies, timeout)) {
+ DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
+ break;
+ }
+ udelay(10);
+ } while (pval & 1);
+
+ pval >>= 8;
+
+ if (pval != dev_priv->rps.cur_delay)
+ DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
+ vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
+ dev_priv->rps.cur_delay,
+ vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
+
+ dev_priv->rps.cur_delay = (u8)pval;
+}
+
+void valleyview_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen6_rps_limits(dev_priv, &val);
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(val > dev_priv->rps.max_delay);
+ WARN_ON(val < dev_priv->rps.min_delay);
+
+ vlv_update_rps_cur_delay(dev_priv);
+
+ DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv->mem_freq,
+ dev_priv->rps.cur_delay),
+ dev_priv->rps.cur_delay,
+ vlv_gpu_freq(dev_priv->mem_freq, val), val);
+
+ if (val == dev_priv->rps.cur_delay)
+ return;
+
+ vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
+
+ dev_priv->rps.cur_delay = val;
+}
+
+
+static void gen6_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+ I915_WRITE(GEN6_RPNSWREQ, 1UL << 31);
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
+ /* Complete PM interrupt masking here doesn't race with the rps work
+ * item again unmasking PM interrupts because that is using a different
+ * register (PMIMR) to mask PM interrupts. The only risk is in leaving
+ * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+
+ spin_lock_irq(&dev_priv->rps.lock);
+ dev_priv->rps.pm_iir = 0;
+ spin_unlock_irq(&dev_priv->rps.lock);
+
+ I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
+}
+
+static void valleyview_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0);
+ /* Complete PM interrupt masking here doesn't race with the rps work
+ * item again unmasking PM interrupts because that is using a different
+ * register (PMIMR) to mask PM interrupts. The only risk is in leaving
+ * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+
+ spin_lock_irq(&dev_priv->rps.lock);
+ dev_priv->rps.pm_iir = 0;
+ spin_unlock_irq(&dev_priv->rps.lock);
+
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+
+ if (dev_priv->vlv_pctx) {
+ drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
+ dev_priv->vlv_pctx = NULL;
+ }
+}
+
+int intel_enable_rc6(const struct drm_device *dev)
+{
+ /* Respect the kernel parameter if it is set */
+ if (i915_enable_rc6 >= 0)
+ return i915_enable_rc6;
+
+ /* Disable RC6 on Ironlake */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ if (IS_HASWELL(dev)) {
+ DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+ return INTEL_RC6_ENABLE;
+ }
+
+ /* snb/ivb have more than one rc6 state. */
+ if (INTEL_INFO(dev)->gen == 6) {
+ DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+ return INTEL_RC6_ENABLE;
+ }
+
+ DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
+ return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+}
+
+static void gen6_enable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ u32 rp_state_cap;
+ u32 gt_perf_status;
+ u32 rc6vids, pcu_mbox, rc6_mask = 0;
+ u32 gtfifodbg;
+ int rc6_mode;
+ int i, ret;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ /* Here begins a magic sequence of register writes to enable
+ * auto-downclocking.
+ *
+ * Perhaps there might be some value in exposing these to
+ * userspace...
+ */
+ I915_WRITE(GEN6_RC_STATE, 0);
+
+ /* Clear the DBG now so we don't confuse earlier errors */
+ if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
+ gen6_gt_force_wake_get(dev_priv);
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+
+ /* In units of 100MHz */
+ dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
+ dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+ dev_priv->rps.cur_delay = 0;
+
+ /* disable the counters and set deterministic thresholds */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_ring(ring, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ /* Check if we are enabling RC6 */
+ rc6_mode = intel_enable_rc6(dev_priv->dev);
+ if (rc6_mode & INTEL_RC6_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+
+ /* We don't use those on Haswell */
+ if (!IS_HASWELL(dev)) {
+ if (rc6_mode & INTEL_RC6p_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+
+ if (rc6_mode & INTEL_RC6pp_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+ }
+
+ DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+ (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+ (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+ (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ rc6_mask |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ if (IS_HASWELL(dev)) {
+ I915_WRITE(GEN6_RPNSWREQ,
+ HSW_FREQUENCY(10));
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ HSW_FREQUENCY(12));
+ } else {
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(10) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN6_FREQUENCY(12));
+ }
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ dev_priv->rps.max_delay << 24 |
+ dev_priv->rps.min_delay << 16);
+
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+ I915_WRITE(GEN6_RP_UP_EI, 66000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
+
+ ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
+ if (!ret) {
+ pcu_mbox = 0;
+ ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
+ if (ret && pcu_mbox & (1UL<<31)) { /* OC supported */
+ DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
+ (dev_priv->rps.max_delay & 0xff) * 50,
+ (pcu_mbox & 0xff) * 50);
+ dev_priv->rps.hw_max = pcu_mbox & 0xff;
+ }
+ } else {
+ DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
+ }
+
+ gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
+
+ /* requires MSI enabled */
+ I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
+ spin_lock_irq(&dev_priv->rps.lock);
+ /* FIXME: Our interrupt enabling sequence is bonghits.
+ * dev_priv->rps.pm_iir really should be 0 here. */
+ dev_priv->rps.pm_iir = 0;
+ I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
+ I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
+ spin_unlock_irq(&dev_priv->rps.lock);
+ /* enable all PM interrupts */
+ I915_WRITE(GEN6_PMINTRMSK, 0);
+
+ rc6vids = 0;
+ ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+ if (IS_GEN6(dev) && ret) {
+ DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+ } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ rc6vids &= 0xffff00;
+ rc6vids |= GEN6_ENCODE_RC6_VID(450);
+ ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ if (ret)
+ DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+ }
+
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+static void gen6_update_ring_freq(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int min_freq = 15;
+ unsigned int gpu_freq;
+ unsigned int max_ia_freq, min_ring_freq;
+ int scaling_factor = 180;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ if (cpu_freq == 0)
+ return;
+
+ max_ia_freq = cpu_freq;
+ DRM_INFO("CPU frequence %d MHz", max_ia_freq);
+ min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
+ /* convert DDR frequency from units of 133.3MHz to bandwidth */
+ min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
+ gpu_freq--) {
+ int diff = dev_priv->rps.max_delay - gpu_freq;
+ unsigned int ia_freq = 0, ring_freq = 0;
+
+ if (IS_HASWELL(dev)) {
+ ring_freq = (gpu_freq * 5 + 3) / 4;
+ ring_freq = max(min_ring_freq, ring_freq);
+ /* leave ia_freq as the default, chosen by cpufreq */
+ } else {
+ /* On older processors, there is no separate ring
+ * clock domain, so in order to boost the bandwidth
+ * of the ring, we need to upclock the CPU (ia_freq).
+ *
+ * For GPU frequencies less than 750MHz, just use the lowest
+ * ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+ ia_freq = ia_freq / 100 + (((ia_freq % 100) >= 50)? 1 : 0);
+ }
+
+ sandybridge_pcode_write(dev_priv,
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
+ ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
+ gpu_freq);
+ }
+}
+
+int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rp0;
+
+ val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
+ /* Clamp to max */
+ rp0 = min(rp0, 0xea);
+
+ return rp0;
+}
+
+static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rpe;
+
+ val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+ rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
+ val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+ rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
+
+ return rpe;
+}
+
+int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
+{
+ return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
+}
+#if 0
+static void vlv_rps_timer_work(struct work_struct *work)
+{
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ rps.vlv_work.work);
+
+ /*
+ * Timer fired, we must be idle. Drop to min voltage state.
+ * Note: we use RPe here since it should match the
+ * Vmin we were shooting for. That should give us better
+ * perf when we come back out of RC6 than if we used the
+ * min freq available.
+ */
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+#endif
+static void valleyview_setup_pctx(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *pctx;
+ unsigned long pctx_paddr;
+ u32 pcbr;
+ int pctx_size = 24*1024;
+
+ pcbr = I915_READ(VLV_PCBR);
+ if (pcbr) {
+ /* BIOS set it up already, grab the pre-alloc'd space */
+ int pcbr_offset;
+
+ pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
+ pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
+ pcbr_offset,
+ -1,
+ pctx_size);
+ goto out;
+ }
+
+ /*
+ * From the Gunit register HAS:
+ * The Gfx driver is expected to program this register and ensure
+ * proper allocation within Gfx stolen memory. For example, this
+ * register should be programmed such than the PCBR range does not
+ * overlap with other ranges, such as the frame buffer, protected
+ * memory, or any other relevant ranges.
+ */
+ pctx = i915_gem_object_create_stolen(dev, pctx_size);
+ if (!pctx) {
+ DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
+ return;
+ }
+
+ pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
+ I915_WRITE(VLV_PCBR, pctx_paddr);
+
+out:
+ dev_priv->vlv_pctx = pctx;
+}
+
+static void valleyview_enable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ u32 gtfifodbg, val;
+ int i;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
+ valleyview_setup_pctx(dev);
+
+ gen6_gt_force_wake_get(dev_priv);
+
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+ I915_WRITE(GEN6_RP_UP_EI, 66000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
+
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_ring(ring, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+ I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
+
+ /* allows RC6 residency counter to work */
+ I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
+ I915_WRITE(GEN6_RC_CONTROL,
+ GEN7_RC_CTL_TO_MODE);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ switch ((val >> 6) & 3) {
+ case 0:
+ case 1:
+ dev_priv->mem_freq = 800;
+ break;
+ case 2:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 3:
+ dev_priv->mem_freq = 1333;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+ DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+ dev_priv->rps.cur_delay = (val >> 8) & 0xff;
+ DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv->mem_freq,
+ dev_priv->rps.cur_delay),
+ dev_priv->rps.cur_delay);
+
+ dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
+ dev_priv->rps.hw_max = dev_priv->rps.max_delay;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv->mem_freq,
+ dev_priv->rps.max_delay),
+ dev_priv->rps.max_delay);
+
+ dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv->mem_freq,
+ dev_priv->rps.rpe_delay),
+ dev_priv->rps.rpe_delay);
+
+ dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv->mem_freq,
+ dev_priv->rps.min_delay),
+ dev_priv->rps.min_delay);
+
+ DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv->mem_freq,
+ dev_priv->rps.rpe_delay),
+ dev_priv->rps.rpe_delay);
+
+ //INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
+
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
+
+ /* requires MSI enabled */
+ I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
+ spin_lock_irq(&dev_priv->rps.lock);
+ WARN_ON(dev_priv->rps.pm_iir != 0);
+ I915_WRITE(GEN6_PMIMR, 0);
+ spin_unlock_irq(&dev_priv->rps.lock);
+ /* enable all PM interrupts */
+ I915_WRITE(GEN6_PMINTRMSK, 0);
+
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+void ironlake_teardown_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->ips.renderctx) {
+ i915_gem_object_unpin(dev_priv->ips.renderctx);
+ drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
+ dev_priv->ips.renderctx = NULL;
+ }
+
+ if (dev_priv->ips.pwrctx) {
+ i915_gem_object_unpin(dev_priv->ips.pwrctx);
+ drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
+ dev_priv->ips.pwrctx = NULL;
+ }
+}
+
+static void ironlake_disable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (I915_READ(PWRCTXA)) {
+ /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+ wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+ 50);
+
+ I915_WRITE(PWRCTXA, 0);
+ POSTING_READ(PWRCTXA);
+
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ POSTING_READ(RSTDBYCTL);
+ }
+}
+
+static int ironlake_setup_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->ips.renderctx == NULL)
+ dev_priv->ips.renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->ips.renderctx)
+ return -ENOMEM;
+
+ if (dev_priv->ips.pwrctx == NULL)
+ dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->ips.pwrctx) {
+ ironlake_teardown_rc6(dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ironlake_enable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ bool was_interruptible;
+ int ret;
+
+ /* rc6 disabled by default due to repeated reports of hanging during
+ * boot and resume.
+ */
+ if (!intel_enable_rc6(dev))
+ return;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ ret = ironlake_setup_rc6(dev);
+ if (ret)
+ return;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ /*
+ * GPU can automatically power down the render unit if given a page
+ * to save state.
+ */
+ ret = intel_ring_begin(ring, 6);
+ if (ret) {
+ ironlake_teardown_rc6(dev);
+ dev_priv->mm.interruptible = was_interruptible;
+ return;
+ }
+
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+ intel_ring_emit(ring, MI_SET_CONTEXT);
+ intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ MI_RESTORE_INHIBIT);
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_advance(ring);
+
+ /*
+ * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
+ * does an implicit flush, combined with MI_FLUSH above, it should be
+ * safe to assume that renderctx is valid
+ */
+ ret = intel_ring_idle(ring);
+ dev_priv->mm.interruptible = was_interruptible;
+ if (ret) {
+ DRM_ERROR("failed to enable ironlake power savings\n");
+ ironlake_teardown_rc6(dev);
+ return;
+ }
+
+ I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ unsigned long freq;
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ freq = ((div * 133333) / ((1<<post) * pre));
+
+ return freq;
+}
+
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+static void intel_init_emon(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lcfuse;
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ I915_WRITE(ECR, 0);
+ POSTING_READ(ECR);
+
+ /* Program energy weights for various events */
+ I915_WRITE(SDEW, 0x15040d00);
+ I915_WRITE(CSIEW0, 0x007f0000);
+ I915_WRITE(CSIEW1, 0x1e220004);
+ I915_WRITE(CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ I915_WRITE(PEW + (i * 4), 0);
+ for (i = 0; i < 3; i++)
+ I915_WRITE(DEW + (i * 4), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+ unsigned long freq = intel_pxfreq(pxvidfreq);
+ unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+ unsigned long val;
+
+ val = vid * vid;
+ val *= (freq / 1000);
+ val *= 255;
+ val /= (127*127*900);
+ if (val > 0xff)
+ DRM_ERROR("bad pxval: %ld\n", val);
+ pxw[i] = (u8)val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+ (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+ I915_WRITE(PXW + (i * 4), val);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ I915_WRITE(OGW0, 0);
+ I915_WRITE(OGW1, 0);
+ I915_WRITE(EG0, 0x00007f00);
+ I915_WRITE(EG1, 0x0000000e);
+ I915_WRITE(EG2, 0x000e0000);
+ I915_WRITE(EG3, 0x68000300);
+ I915_WRITE(EG4, 0x42000000);
+ I915_WRITE(EG5, 0x00140031);
+ I915_WRITE(EG6, 0);
+ I915_WRITE(EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ I915_WRITE(PXWL + (i * 4), 0);
+
+ /* Enable PMON + select events */
+ I915_WRITE(ECR, 0x80000019);
+
+ lcfuse = I915_READ(LCFUSE02);
+
+ dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
+void intel_disable_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Interrupts should be disabled already to avoid re-arming. */
+ /* fix me i915_quiesce */
+// WARN_ON(dev->irq_enabled);
+
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_disable_drps(dev);
+ ironlake_disable_rc6(dev);
+ } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
+ del_timer_sync(&dev_priv->rps.delayed_resume_timer);
+// if (IS_VALLEYVIEW(dev))
+// cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
+ mutex_lock(&dev_priv->rps.hw_lock);
+ if (IS_VALLEYVIEW(dev))
+ valleyview_disable_rps(dev);
+ else
+ gen6_disable_rps(dev);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ }
+}
+
+static void intel_gen6_powersave_work(struct work_struct *work)
+{
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ rps.delayed_resume_work);
+ struct drm_device *dev = dev_priv->dev;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ if (IS_VALLEYVIEW(dev)) {
+ valleyview_enable_rps(dev);
+ } else {
+ gen6_enable_rps(dev);
+ gen6_update_ring_freq(dev);
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+void
+intel_gen6_powersave_work_timer(void *device)
+{
+ struct drm_device *dev = (struct drm_device *)device;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ (void) queue_work(dev_priv->wq, &dev_priv->rps.delayed_resume_work);
+}
+void intel_enable_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_enable_drps(dev);
+ ironlake_enable_rc6(dev);
+ intel_init_emon(dev);
+ } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ /*
+ * PCU communication is slow and this doesn't need to be
+ * done at any specific time, so do this out of our fast path
+ * to make resume and init faster.
+ */
+ test_set_timer(&dev_priv->rps.delayed_resume_timer, DRM_HZ);
+ }
+}
+
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void g4x_disable_trickle_feed(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+}
+
+static void ironlake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+
+ /* Required for FBC */
+ dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
+ ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
+ ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
+
+ I915_WRITE(PCH_3DCGDIS0,
+ MARIUNIT_CLOCK_GATE_DISABLE |
+ SVSMUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(PCH_3DCGDIS1,
+ VFMUNIT_CLOCK_GATE_DISABLE);
+
+ /*
+ * According to the spec the following bits should be set in
+ * order to enable memory self-refresh
+ * The bit 22/21 of 0x42004
+ * The bit 5 of 0x42020
+ * The bit 15 of 0x45000
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+ dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
+ I915_WRITE(DISP_ARB_CTL,
+ (I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS));
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /*
+ * Based on the document from hardware guys the following bits
+ * should be set unconditionally in order to enable FBC.
+ * The bit 22 of 0x42000
+ * The bit 22 of 0x42004
+ * The bit 7,8,9 of 0x42020.
+ */
+ if (IS_IRONLAKE_M(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE);
+ }
+
+ I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+ I915_WRITE(_3D_CHICKEN2,
+ _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+ _3D_CHICKEN2_WM_READ_PIPELINED);
+
+ /* WaDisableRenderCachePipelinedFlush */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+
+ g4x_disable_trickle_feed(dev);
+
+ ibx_init_clock_gating(dev);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t val;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ DPLS_EDP_PPS_FIX_DIS);
+ /* The below fixes the weird display corruption, a few pixels shifted
+ * downward, on (only) LVDS of some HP laptops with IVY.
+ */
+ for_each_pipe(pipe) {
+ val = I915_READ(TRANS_CHICKEN2(pipe));
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+ if (dev_priv->vbt.fdi_rx_polarity_inverted)
+ val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
+ val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
+ I915_WRITE(TRANS_CHICKEN2(pipe), val);
+ }
+ /* WADP0ClockGatingDisable */
+ for_each_pipe(pipe) {
+ I915_WRITE(TRANS_CHICKEN1(pipe),
+ TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+ }
+}
+
+static void gen6_check_mch_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ tmp = I915_READ(MCH_SSKPD);
+ if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
+ DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
+ DRM_INFO("This can cause pipe underruns and display issues.\n");
+ DRM_INFO("Please upgrade your BIOS to fix this.\n");
+ }
+}
+
+static void gen6_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
+ /* WaDisableHiZPlanesWhenMSAAEnabled */
+ I915_WRITE(_3D_CHICKEN,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
+
+ /* WaSetupGtModeTdRowDispatch */
+ if (IS_SNB_GT1(dev))
+ I915_WRITE(GEN6_GT_MODE,
+ _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+ I915_WRITE(GEN6_UCGCTL1,
+ I915_READ(GEN6_UCGCTL1) |
+ GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ *
+ * Also apply WaDisableVDSUnitClockGating and
+ * WaDisableRCPBUnitClockGating.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ /* Bspec says we need to always set all mask bits. */
+ I915_WRITE(_3D_CHICKEN3, (0xFFFFUL << 16) |
+ _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
+
+ /*
+ * According to the spec the following bits should be
+ * set in order to enable memory self-refresh and fbc:
+ * The bit21 and bit22 of 0x42000
+ * The bit21 and bit22 of 0x42004
+ * The bit5 and bit7 of 0x42020
+ * The bit14 of 0x70180
+ * The bit14 of 0x71180
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+ I915_WRITE(ILK_DSPCLK_GATE_D,
+ I915_READ(ILK_DSPCLK_GATE_D) |
+ ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
+ ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
+
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+ g4x_disable_trickle_feed(dev);
+
+ /* The default value should be 0x200 according to docs, but the two
+ * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
+ I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffffUL));
+ I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
+
+ cpt_init_clock_gating(dev);
+
+ gen6_check_mch_setup(dev);
+}
+
+static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
+{
+ uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+
+ reg &= ~GEN7_FF_SCHED_MASK;
+ reg |= GEN7_FF_TS_SCHED_HW;
+ reg |= GEN7_FF_VS_SCHED_HW;
+ reg |= GEN7_FF_DS_SCHED_HW;
+
+ if (IS_HASWELL(dev_priv->dev))
+ reg &= ~GEN7_FF_VS_REF_CNT_FFME;
+
+ I915_WRITE(GEN7_FF_THREAD_MODE, reg);
+}
+
+static void lpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * TODO: this bit should only be enabled when really needed, then
+ * disabled when not needed anymore in order to save power.
+ */
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+ I915_WRITE(SOUTH_DSPCLK_GATE_D,
+ I915_READ(SOUTH_DSPCLK_GATE_D) |
+ PCH_LP_PARTITION_LEVEL_DISABLE);
+
+ /* WADPOClockGatingDisable:hsw */
+ I915_WRITE(_TRANSA_CHICKEN1,
+ I915_READ(_TRANSA_CHICKEN1) |
+ TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+}
+
+static void lpt_suspend_hw(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+ uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
+
+ val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ }
+}
+
+static void haswell_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1,
+ GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ g4x_disable_trickle_feed(dev);
+
+
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ /* WaDisable4x2SubspanOptimization */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+ /* WaSwitchSolVfFArbitrationPriority:hsw */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+
+ /* WaRsPkgCStateDisplayPMReq:hsw */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+
+ lpt_init_clock_gating(dev);
+}
+
+static void ivybridge_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t snpcr;
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableEarlyCull */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+ /* WaDisableBackToBackFlipFix */
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* WaDisablePSDDualDispatchEnable */
+ if (IS_IVB_GT1(dev))
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+ else
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
+ _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1,
+ GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+ if (IS_IVB_GT1(dev))
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ else
+ I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ *
+ * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ g4x_disable_trickle_feed(dev);
+
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+ /* WaVSRefCountFullforceMissDisable:ivb */
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ /* WaDisable4x2SubspanOptimization */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
+ snpcr |= GEN6_MBC_SNPCR_MED;
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+ if (!HAS_PCH_NOP(dev))
+ cpt_init_clock_gating(dev);
+
+ gen6_check_mch_setup(dev);
+}
+
+static void valleyview_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableEarlyCull */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+ /* WaDisableBackToBackFlipFix */
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+
+ /* WaForceL3Serialization */
+ I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+ ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /* WaDisableDopClockGating */
+ I915_WRITE(GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ /* WaMbcDriverBootEnable */
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ *
+ * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ *
+ * Also apply WaDisableVDSUnitClockGating and
+ * WaDisableRCPBUnitClockGating.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
+ GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ /*
+ * On ValleyView, the GUnit needs to signal the GT
+ * when flip and other events complete. So enable
+ * all the GUnit->GT interrupts here
+ */
+ I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
+
+ /* Conservative clock gating settings for now */
+ I915_WRITE(0x9400, 0xffffffff);
+ I915_WRITE(0x9404, 0xffffffff);
+ I915_WRITE(0x9408, 0xffffffff);
+ I915_WRITE(0x940c, 0xffffffff);
+ I915_WRITE(0x9410, 0xffffffff);
+ I915_WRITE(0x9414, 0xffffffff);
+ I915_WRITE(0x9418, 0xffffffff);
+}
+
+static void g4x_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate;
+
+ I915_WRITE(RENCLK_GATE_D1, 0);
+ I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+ GS_UNIT_CLOCK_GATE_DISABLE |
+ CL_UNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
+ OVRUNIT_CLOCK_GATE_DISABLE |
+ OVCUNIT_CLOCK_GATE_DISABLE;
+ if (IS_GM45(dev))
+ dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+
+ /* WaDisableRenderCachePipelinedFlush */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+
+ g4x_disable_trickle_feed(dev);
+}
+
+static void crestline_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+ I915_WRITE(DSPCLK_GATE_D, 0);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ I915_WRITE16(DEUC, 0);
+ I915_WRITE(MI_ARB_STATE,
+ _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+}
+
+static void broadwater_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ I965_RCC_CLOCK_GATE_DISABLE |
+ I965_RCPB_CLOCK_GATE_DISABLE |
+ I965_ISC_CLOCK_GATE_DISABLE |
+ I965_FBC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+ I915_WRITE(MI_ARB_STATE,
+ _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+}
+
+static void gen3_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dstate = I915_READ(D_STATE);
+
+ dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+ DSTATE_DOT_CLOCK_GATING;
+ I915_WRITE(D_STATE, dstate);
+
+ if (IS_PINEVIEW(dev))
+ I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+
+ /* IIR "flip pending" means done if this bit is set */
+ I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+}
+
+static void i85x_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+}
+
+static void i830_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+}
+
+void intel_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->display.init_clock_gating(dev);
+}
+
+void intel_suspend_hw(struct drm_device *dev)
+{
+ if (HAS_PCH_LPT(dev))
+ lpt_suspend_hw(dev);
+}
+
+/**
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+bool intel_display_power_enabled(struct drm_device *dev,
+ enum intel_display_power_domain domain)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_POWER_WELL(dev))
+ return true;
+
+ switch (domain) {
+ case POWER_DOMAIN_PIPE_A:
+ case POWER_DOMAIN_TRANSCODER_EDP:
+ return true;
+ case POWER_DOMAIN_PIPE_B:
+ case POWER_DOMAIN_PIPE_C:
+ case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
+ case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
+ case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
+ case POWER_DOMAIN_TRANSCODER_A:
+ case POWER_DOMAIN_TRANSCODER_B:
+ case POWER_DOMAIN_TRANSCODER_C:
+ return I915_READ(HSW_PWR_WELL_DRIVER) ==
+ (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
+ default:
+ BUG();
+ return false;
+ }
+}
+
+static void __intel_set_power_well(struct drm_device *dev, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool is_enabled, enable_requested;
+ uint32_t tmp;
+
+ tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+ is_enabled = tmp & HSW_PWR_WELL_STATE;
+ enable_requested = tmp & HSW_PWR_WELL_ENABLE;
+
+ if (enable) {
+ if (!enable_requested)
+ I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
+
+ if (!is_enabled) {
+ DRM_DEBUG_KMS("Enabling power well\n");
+ if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+ HSW_PWR_WELL_STATE), 20))
+ DRM_ERROR("Timeout enabling power well\n");
+ }
+ } else {
+ if (enable_requested) {
+ unsigned long irqflags;
+ enum pipe p;
+
+ I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+ POSTING_READ(HSW_PWR_WELL_DRIVER);
+
+ DRM_DEBUG_KMS("Requesting to disable the power well\n");
+ /*
+ * After this, the registers on the pipes that are part
+ * of the power well will become zero, so we have to
+ * adjust our counters according to that.
+ *
+ * FIXME: Should we do this in general in
+ * drm_vblank_post_modeset?
+ */
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for_each_pipe(p)
+ if (p != PIPE_A)
+ dev->last_vblank[p] = 0;
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ }
+ }
+}
+
+static struct i915_power_well *hsw_pwr;
+
+/* Display audio driver power well request */
+void i915_request_power_well(void)
+{
+ if (!hsw_pwr)
+ return;
+
+ spin_lock_irq(&hsw_pwr->lock);
+ if (!hsw_pwr->count++ &&
+ !hsw_pwr->i915_request)
+ __intel_set_power_well(hsw_pwr->device, true);
+ spin_unlock_irq(&hsw_pwr->lock);
+}
+
+/* Display audio driver power well release */
+void i915_release_power_well(void)
+{
+ if (!hsw_pwr)
+ return;
+
+ spin_lock_irq(&hsw_pwr->lock);
+ WARN_ON(!hsw_pwr->count);
+ if (!--hsw_pwr->count &&
+ !hsw_pwr->i915_request)
+ __intel_set_power_well(hsw_pwr->device, false);
+ spin_unlock_irq(&hsw_pwr->lock);
+}
+
+int i915_init_power_well(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ hsw_pwr = &dev_priv->power_well;
+
+ hsw_pwr->device = dev;
+ spin_lock_init(&hsw_pwr->lock);
+ hsw_pwr->count = 0;
+
+ return 0;
+}
+
+void i915_remove_power_well(struct drm_device *dev)
+{
+ hsw_pwr = NULL;
+}
+
+void intel_set_power_well(struct drm_device *dev, bool enable)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_well *power_well = &dev_priv->power_well;
+
+ if (!HAS_POWER_WELL(dev))
+ return;
+
+ if (!i915_disable_power_well && !enable)
+ return;
+
+ spin_lock_irq(&power_well->lock);
+ power_well->i915_request = enable;
+
+ /* only reject "disable" power well request */
+ if (power_well->count && !enable) {
+ spin_unlock_irq(&power_well->lock);
+ return;
+ }
+
+ __intel_set_power_well(dev, enable);
+ spin_unlock_irq(&power_well->lock);
+}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+void intel_init_power_well(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_POWER_WELL(dev))
+ return;
+
+ /* For now, we need the power well to be always enabled. */
+ intel_set_power_well(dev, true);
+
+ /* We're taking over the BIOS, so clear any requests made by it since
+ * the driver is in charge now. */
+ if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
+ I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+/* Set up chip specific power management-related functions */
+void intel_init_pm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (I915_HAS_FBC(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ dev_priv->display.enable_fbc =
+ gen7_enable_fbc;
+ else
+ dev_priv->display.enable_fbc =
+ ironlake_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (IS_GM45(dev)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
+ } else if (IS_CRESTLINE(dev)) {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
+ }
+ /* 855GM needs testing */
+ }
+
+ /* For cxsr */
+ if (IS_PINEVIEW(dev))
+ i915_pineview_get_mem_freq(dev);
+ else if (IS_GEN5(dev))
+ i915_ironlake_get_mem_freq(dev);
+
+ /* For FIFO watermark updates */
+ if (HAS_PCH_SPLIT(dev)) {
+ if (IS_GEN5(dev)) {
+ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ dev_priv->display.update_wm = ironlake_update_wm;
+ else {
+ DRM_DEBUG_KMS("Failed to get proper latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+ } else if (IS_GEN6(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+ } else if (IS_IVYBRIDGE(dev)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = ivybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ } else if (IS_HASWELL(dev)) {
+ if (I915_READ64(MCH_SSKPD)) {
+ dev_priv->display.update_wm = haswell_update_wm;
+ dev_priv->display.update_sprite_wm =
+ haswell_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = haswell_init_clock_gating;
+ } else
+ dev_priv->display.update_wm = NULL;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.update_wm = valleyview_update_wm;
+ dev_priv->display.init_clock_gating =
+ valleyview_init_clock_gating;
+ } else if (IS_PINEVIEW(dev)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ DRM_INFO("failed to find known CxSR latency "
+ "(found ddr%s fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ (dev_priv->is_ddr3 == 1) ? "3" : "2",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ pineview_disable_cxsr(dev);
+ dev_priv->display.update_wm = NULL;
+ } else
+ dev_priv->display.update_wm = pineview_update_wm;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_G4X(dev)) {
+ dev_priv->display.update_wm = g4x_update_wm;
+ dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+ } else if (IS_GEN4(dev)) {
+ dev_priv->display.update_wm = i965_update_wm;
+ if (IS_CRESTLINE(dev))
+ dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+ else if (IS_BROADWATER(dev))
+ dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+ } else if (IS_GEN3(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_I865G(dev)) {
+ dev_priv->display.update_wm = i830_update_wm;
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ } else if (IS_I85X(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i85x_get_fifo_size;
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ } else {
+ dev_priv->display.update_wm = i830_update_wm;
+ dev_priv->display.init_clock_gating = i830_init_clock_gating;
+ if (IS_845G(dev))
+ dev_priv->display.get_fifo_size = i845_get_fifo_size;
+ else
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ }
+}
+
+static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+{
+ u32 gt_thread_status_mask;
+
+ if (IS_HASWELL(dev_priv->dev))
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
+ else
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
+
+ /* w/a for a sporadic read returning 0 by waiting for the GT
+ * thread to wake up.
+ */
+ if (wait_for_atomic((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 1))
+ DRM_INFO("GT thread status wait timed out\n");
+}
+
+static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+}
+
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_INFO("Timed out waiting for forcewake old ack to clear.\n");
+
+ I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_INFO("Timed out waiting for forcewake to ack request.\n");
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffffUL));
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+}
+
+static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ u32 forcewake_ack;
+
+ if (IS_HASWELL(dev_priv->dev))
+ forcewake_ack = FORCEWAKE_ACK_HSW;
+ else
+ forcewake_ack = FORCEWAKE_MT_ACK;
+
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_INFO("Timed out waiting for forcewake old ack to clear.\n");
+
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+
+ if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_INFO("Timed out waiting for forcewake to ack request.\n");
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+/*
+ * Generally this is called implicitly by the register read function. However,
+ * if some sequence requires the GT to not power down then this function should
+ * be called at the beginning of the sequence followed by a call to
+ * gen6_gt_force_wake_put() at the end of the sequence.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (dev_priv->forcewake_count++ == 0)
+ dev_priv->gt.force_wake_get(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+}
+
+void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+ u32 gtfifodbg;
+ gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
+ if (gtfifodbg & GT_FIFO_CPU_ERROR_MASK) {
+ DRM_ERROR("MMIO read or write has been dropped %x\n", gtfifodbg);
+ I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+ }
+}
+
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
+ POSTING_READ(ECOBUS);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
+ POSTING_READ(ECOBUS);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+/*
+ * see gen6_gt_force_wake_get()
+ */
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (--dev_priv->forcewake_count == 0)
+ dev_priv->gt.force_wake_put(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+}
+
+int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+ int loop = 500;
+ u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+ udelay(10);
+ fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ }
+ if (loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES) {
+ ++ret;
+ WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
+ }
+ dev_priv->gt_fifo_count = fifo;
+ }
+ dev_priv->gt_fifo_count--;
+
+ return ret;
+}
+
+static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffffUL));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ POSTING_READ(FORCEWAKE_ACK_VLV);
+}
+
+static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
+
+ if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* The below doubles as a POSTING_READ */
+ POSTING_READ(FORCEWAKE_ACK_VLV);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+void intel_gt_sanitize(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ vlv_force_wake_reset(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ __gen6_gt_force_wake_reset(dev_priv);
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ __gen6_gt_force_wake_mt_reset(dev_priv);
+ }
+
+ /* BIOS often leaves RC6 enabled, but disable it for hw init */
+ if (INTEL_INFO(dev)->gen >= 6)
+ intel_disable_gt_powersave(dev);
+}
+
+void intel_gt_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->gt.force_wake_get = vlv_force_wake_get;
+ dev_priv->gt.force_wake_put = vlv_force_wake_put;
+ } else if (IS_HASWELL(dev)) {
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ /* IVB configs may use multi-threaded forcewake */
+
+ /* A small trick here - if the bios hasn't configured
+ * MT forcewake, and if the device is in RC6, then
+ * force_wake_mt_get will not wake the device and the
+ * ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT
+ * forcewake being disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ dev_priv->gt.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->gt.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ } else {
+ DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+ DRM_INFO("when using vblank-synced partial screen updates.\n");
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+ }
+ } else if (IS_GEN6(dev)) {
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+ }
+}
+
+void intel_pm_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_WORK(&dev_priv->rps.delayed_resume_work, intel_gen6_powersave_work);
+ setup_timer(&dev_priv->rps.delayed_resume_timer, intel_gen6_powersave_work_timer,
+ (void *)dev);
+}
+
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
+ return -EAGAIN;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, *val);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500)) {
+ DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
+ return -ETIMEDOUT;
+ }
+
+ *val = I915_READ(GEN6_PCODE_DATA);
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+
+ return 0;
+}
+
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
+ return -EAGAIN;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, val);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500)) {
+ DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
+ return -ETIMEDOUT;
+ }
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+
+ return 0;
+}
+
+int vlv_gpu_freq(int ddr_freq, int val)
+{
+ int mult, base;
+
+ switch (ddr_freq) {
+ case 800:
+ mult = 20;
+ base = 120;
+ break;
+ case 1066:
+ mult = 22;
+ base = 133;
+ break;
+ case 1333:
+ mult = 21;
+ base = 125;
+ break;
+ default:
+ return -1;
+ }
+
+ return ((val - 0xbd) * mult) + base;
+}
+
+int vlv_freq_opcode(int ddr_freq, int val)
+{
+ int mult, base;
+
+ switch (ddr_freq) {
+ case 800:
+ mult = 20;
+ base = 120;
+ break;
+ case 1066:
+ mult = 22;
+ base = 133;
+ break;
+ case 1333:
+ mult = 21;
+ base = 125;
+ break;
+ default:
+ return -1;
+ }
+
+ val /= mult;
+ val -= base / mult;
+ val += 0xbd;
+
+ if (val > 0xea)
+ val = 0xea;
+
+ return val;
+}
+
diff --git a/usr/src/uts/intel/io/i915/intel_ringbuffer.c b/usr/src/uts/intel/io/i915/intel_ringbuffer.c
new file mode 100644
index 0000000..5cde51d
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_ringbuffer.c
@@ -0,0 +1,2091 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2008-2010, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Zou Nan hai <nanhai.zou@intel.com>
+ * Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drv.h"
+#include "i915_drm.h"
+#include "intel_drv.h"
+
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+ struct drm_i915_gem_object *obj;
+ volatile u32 *cpu_page;
+ u32 gtt_offset;
+};
+
+static inline int ring_space(struct intel_ring_buffer *ring)
+{
+ int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
+ if (space < 0)
+ space += ring->size;
+ return space;
+}
+
+static int
+gen2_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ u32 cmd;
+ int ret;
+
+ cmd = MI_FLUSH;
+ if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
+ cmd |= MI_NO_WRITE_FLUSH;
+
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen4_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ struct drm_device *dev = ring->dev;
+ u32 cmd;
+ int ret;
+
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
+
+ if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+ (IS_GEN4(dev) || IS_GEN5(dev)))
+ cmd |= MI_INVALIDATE_ISP;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/**
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6. From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ * "1 of the following must also be set:
+ * - Render Target Cache Flush Enable ([12] of DW1)
+ * - Depth Cache Flush Enable ([0] of DW1)
+ * - Stall at Pixel Scoreboard ([1] of DW1)
+ * - Depth Stall ([13] of DW1)
+ * - Post-Sync Operation ([13] of DW1)
+ * - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it. Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either. Notify enable is IRQs, which aren't
+ * really our business. That leaves only stall at scoreboard.
+ */
+static int
+intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(ring, 0); /* low dword */
+ intel_ring_emit(ring, 0); /* high dword */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen6_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /* Force SNB workarounds for PIPE_CONTROL flushes */
+ ret = intel_emit_post_sync_nonzero_flush(ring);
+ if (ret)
+ return ret;
+
+ /* Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /*
+ * Ensure that any following seqno writes only happen
+ * when the render cache is indeed flushed.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
+ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, 0); /* lower dword */
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
+{
+ int ret;
+
+ if (!ring->fbc_dirty)
+ return 0;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+ intel_ring_emit(ring, MI_NOOP);
+ /* WaFbcNukeOn3DBlt:ivb/hsw */
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, MSG_FBC_REND_STATE);
+ intel_ring_emit(ring, value);
+ intel_ring_advance(ring);
+
+ ring->fbc_dirty = false;
+ return 0;
+}
+
+static int
+gen7_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains, u32 flush_domains)
+{
+ u32 flags = 0;
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /*
+ * Ensure that any following seqno writes only happen when the render
+ * cache is indeed flushed.
+ *
+ * Workaround: 4th PIPE_CONTROL command (except the ones with only
+ * read-cache invalidate bits set) must have the CS_STALL bit set. We
+ * don't try to be clever and just set it unconditionally.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ /* Just flush everything. Experiments have shown that reducing the
+ * number of bits based on the write domains has little performance
+ * impact.
+ */
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
+ /* Workaround: we must issue a pipe_control with CS-stall bit
+ * set before a pipe_control command that has the state cache
+ * invalidate bit set. */
+ gen7_render_ring_cs_stall_wa(ring);
+ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ if (flush_domains)
+ return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
+
+ return 0;
+}
+
+static void ring_write_tail(struct intel_ring_buffer *ring,
+ u32 value)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ I915_WRITE_TAIL(ring, value);
+}
+
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
+ RING_ACTHD(ring->mmio_base) : ACTHD;
+
+ return I915_READ(acthd_reg);
+}
+
+static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 addr;
+
+ addr = dev_priv->status_page_dmah->paddr;
+ if (INTEL_INFO(ring->dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->paddr >> 28) & 0xf0;
+ I915_WRITE(HWS_PGA, addr);
+}
+
+static int init_ring_common(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj = ring->obj;
+ int ret = 0;
+ u32 head;
+
+ if (HAS_FORCE_WAKE(dev))
+ gen6_gt_force_wake_get(dev_priv);
+
+ if (I915_NEED_GFX_HWS(dev))
+ intel_ring_setup_status_page(ring);
+ else
+ ring_setup_phys_status_page(ring);
+
+ /* Stop the ring if it's running. */
+ I915_WRITE_CTL(ring, 0);
+ I915_WRITE_HEAD(ring, 0);
+ ring->write_tail(ring, 0);
+
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
+
+ /* G45 ring initialization fails to reset head to zero */
+ if (head != 0) {
+ DRM_DEBUG_KMS("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+
+ I915_WRITE_HEAD(ring, 0);
+
+ if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+ DRM_ERROR("failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+ }
+ }
+
+ /* Initialize the ring. This must happen _after_ we've cleared the ring
+ * registers with the above sequence (the readback of the HEAD registers
+ * also enforces ordering), otherwise the hw might lose the new ring
+ * register values. */
+ I915_WRITE_START(ring, obj->gtt_offset);
+ I915_WRITE_CTL(ring,
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+ | RING_VALID);
+
+ /* If the head is still not zero, the ring is dead */
+ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
+ I915_READ_START(ring) == obj->gtt_offset &&
+ (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
+ DRM_ERROR("%s initialization failed "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+ i915_kernel_lost_context(ring->dev);
+ else {
+ ring->head = I915_READ_HEAD(ring);
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ring->space = ring_space(ring);
+ ring->last_retired_head = 0xffffffff;
+ }
+
+ (void)memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+
+out:
+ if (HAS_FORCE_WAKE(dev))
+ gen6_gt_force_wake_put(dev_priv);
+
+ return ret;
+}
+
+static int
+init_pipe_control(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ if (ring->private)
+ return 0;
+
+ pc = kmalloc(sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+ ret = i915_gem_object_pin(obj, 4096, true, false);
+ if (ret)
+ goto err_unref;
+
+ pc->gtt_offset = obj->gtt_offset;
+ /*LINTED E_BAD_PTR_CAST_ALIGN*/
+ pc->cpu_page = (u32*) obj->page_list[0];
+ if (pc->cpu_page == NULL)
+ goto err_unpin;
+
+ DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
+ ring->name, pc->gtt_offset);
+
+ pc->obj = obj;
+ ring->private = pc;
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+err:
+ kfree(pc, sizeof(*pc));
+ return ret;
+}
+
+static void
+cleanup_pipe_control(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ struct drm_i915_gem_object *obj;
+
+ obj = pc->obj;
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+
+ kfree(pc, sizeof(*pc));
+}
+
+static int init_render_ring(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = init_ring_common(ring);
+
+ if (INTEL_INFO(dev)->gen > 3)
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
+
+ /* We need to disable the AsyncFlip performance optimisations in order
+ * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+ * programmed to '1' on all products.
+ *
+ * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
+ */
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+ /* Required for the hardware to program scanline values for waiting */
+ if (INTEL_INFO(dev)->gen == 6)
+ I915_WRITE(GFX_MODE,
+ _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
+
+ if (IS_GEN7(dev))
+ I915_WRITE(GFX_MODE_GEN7,
+ _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ret = init_pipe_control(ring);
+ if (ret)
+ return ret;
+ }
+
+ if (IS_GEN6(dev)) {
+ /* From the Sandybridge PRM, volume 1 part 3, page 24:
+ * "If this bit is set, STCunit will have LRA as replacement
+ * policy. [...] This bit must be reset. LRA replacement
+ * policy is not supported."
+ */
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+ /* This is not explicitly set for GEN6, so read the register.
+ * see intel_ring_mi_set_context() for why we care.
+ * TODO: consider explicitly setting the bit for GEN5
+ */
+ ring->itlb_before_ctx_switch =
+ !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
+ }
+
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+
+ if (HAS_L3_GPU_CACHE(dev))
+ I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+
+ return ret;
+}
+
+static void render_ring_cleanup(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ if (!ring->private)
+ return;
+
+ if (HAS_BROKEN_CS_TLB(dev))
+ drm_gem_object_unreference(to_gem_object(ring->private));
+
+ if (INTEL_INFO(dev)->gen >= 5)
+ cleanup_pipe_control(ring);
+
+ ring->private = NULL;
+}
+
+static void
+update_mboxes(struct intel_ring_buffer *ring,
+ u32 mmio_offset)
+{
+/* NB: In order to be able to do semaphore MBOX updates for varying number
+ * of rings, it's easiest if we round up each individual update to a
+ * multiple of 2 (since ring updates must always be a multiple of 2)
+ * even though the actual update only requires 3 dwords.
+ */
+#define MBOX_UPDATE_DWORDS 4
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, mmio_offset);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, MI_NOOP);
+}
+
+/**
+ * gen6_add_request - Update the semaphore mailbox registers
+ *
+ * @ring - ring that is adding a request
+ * @seqno - return seqno stuck into the ring
+ *
+ * Update the mailbox registers in the *other* rings with the current seqno.
+ * This acts like a signal in the canonical semaphore.
+ */
+static int
+gen6_add_request(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *useless;
+ int i, ret;
+
+ ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
+ MBOX_UPDATE_DWORDS) +
+ 4);
+ if (ret)
+ return ret;
+#undef MBOX_UPDATE_DWORDS
+
+ for_each_ring(useless, dev_priv, i) {
+ u32 mbox_reg = ring->signal_mbox[i];
+ if (mbox_reg != GEN6_NOSYNC)
+ update_mboxes(ring, mbox_reg);
+ }
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+ u32 seqno)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->last_seqno < seqno;
+}
+
+/**
+ * intel_ring_sync - sync the waiter to the signaller on seqno
+ *
+ * @waiter - ring that is waiting
+ * @signaller - ring which has, or will signal
+ * @seqno - seqno which the waiter will block on
+ */
+static int
+gen6_ring_sync(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ u32 seqno)
+{
+ int ret;
+ u32 dw1 = MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_COMPARE |
+ MI_SEMAPHORE_REGISTER;
+
+ /* Throughout all of the GEM code, seqno passed implies our current
+ * seqno is >= the last seqno executed. However for hardware the
+ * comparison is strictly greater than.
+ */
+ seqno -= 1;
+
+ WARN_ON(signaller->semaphore_register[waiter->id] ==
+ MI_SEMAPHORE_SYNC_INVALID);
+
+ ret = intel_ring_begin(waiter, 4);
+ if (ret)
+ return ret;
+
+ /* If seqno wrap happened, omit the wait with no-ops */
+ if (!i915_gem_has_seqno_wrapped(waiter->dev, seqno)) {
+ intel_ring_emit(waiter,
+ dw1 |
+ signaller->semaphore_register[waiter->id]);
+ intel_ring_emit(waiter, seqno);
+ intel_ring_emit(waiter, 0);
+ intel_ring_emit(waiter, MI_NOOP);
+ } else {
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ intel_ring_emit(waiter, MI_NOOP);
+ }
+ intel_ring_advance(waiter);
+
+ return 0;
+}
+
+#define PIPE_CONTROL_FLUSH(ring__, addr__) \
+do { \
+ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL); \
+ intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
+ intel_ring_emit(ring__, 0); \
+ intel_ring_emit(ring__, 0); \
+} while (0)
+
+static int
+pc_render_add_request(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+
+ /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
+ * incoherent with writes to memory, i.e. completely fubar,
+ * so we need to use PIPE_NOTIFY instead.
+ *
+ * However, we also need to workaround the qword write
+ * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
+ * memory before requesting an interrupt.
+ */
+ ret = intel_ring_begin(ring, 32);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, 0);
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+ PIPE_CONTROL_NOTIFY);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static u32
+gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+{
+ /* Workaround to force correct ordering between irq and seqno writes on
+ * ivb (and maybe also on snb) by reading from a CS register (like
+ * ACTHD) before reading the status page. */
+ if (!lazy_coherency)
+ (void) intel_ring_get_active_head(ring);
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static u32
+ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+{
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void
+ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+}
+
+static u32
+pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+{
+ struct pipe_control *pc = ring->private;
+ return pc->cpu_page[0];
+}
+
+static void
+pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ struct pipe_control *pc = ring->private;
+ pc->cpu_page[0] = seqno;
+}
+
+static bool
+gen5_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount.gt++ == 0) {
+ dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
+}
+
+static void
+gen5_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount.gt == 0) {
+ dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static bool
+i9xx_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount.gt++ == 0) {
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
+}
+
+static void
+i9xx_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount.gt == 0) {
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static bool
+i8xx_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount.gt++ == 0) {
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
+}
+
+static void
+i8xx_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount.gt == 0) {
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 mmio = 0;
+
+ /* The ring status page addresses are no longer next to the rest of
+ * the ring registers as of gen7.
+ */
+ if (IS_GEN7(dev)) {
+ switch (ring->id) {
+ case RCS:
+ mmio = RENDER_HWS_PGA_GEN7;
+ break;
+ case BCS:
+ mmio = BLT_HWS_PGA_GEN7;
+ break;
+ case VCS:
+ mmio = BSD_HWS_PGA_GEN7;
+ break;
+ case VECS:
+ mmio = VEBOX_HWS_PGA_GEN7;
+ break;
+ }
+ } else if (IS_GEN6(ring->dev)) {
+ mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else {
+ mmio = RING_HWS_PGA(ring->mmio_base);
+ }
+
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ POSTING_READ(mmio);
+
+ /* Flush the TLB for this page */
+ if (INTEL_INFO(dev)->gen >= 6) {
+ u32 reg = RING_INSTPM(ring->mmio_base);
+ I915_WRITE(reg,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+ 1000))
+ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ ring->name);
+ }
+
+}
+
+static int
+bsd_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
+}
+
+static int
+i9xx_add_request(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, ring->outstanding_lazy_request);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static bool
+gen6_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ /* It looks like we need to prevent the gt from suspending while waiting
+ * for an notifiy irq, otherwise irqs seem to get lost on at least the
+ * blt/bsd rings on ivb. */
+ gen6_gt_force_wake_get(dev_priv);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount.gt++ == 0) {
+ if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring,
+ ~(ring->irq_enable_mask |
+ GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+ else
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
+}
+
+static void
+gen6_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount.gt == 0) {
+ if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring,
+ ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+ else
+ I915_WRITE_IMR(ring, ~0);
+ dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+static bool
+hsw_vebox_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->rps.lock, flags);
+ if (ring->irq_refcount.pm++ == 0) {
+ u32 pm_imr = I915_READ(GEN6_PMIMR);
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
+ POSTING_READ(GEN6_PMIMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+
+ return true;
+}
+
+static void
+hsw_vebox_put_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return;
+
+ spin_lock_irqsave(&dev_priv->rps.lock, flags);
+ if (--ring->irq_refcount.pm == 0) {
+ u32 pm_imr = I915_READ(GEN6_PMIMR);
+ I915_WRITE_IMR(ring, ~0);
+ I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
+ POSTING_READ(GEN6_PMIMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+}
+
+static int
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 length,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ MI_BATCH_GTT |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT (256*1024)
+static int
+i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ if (flags & I915_DISPATCH_PINNED) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ } else {
+ struct drm_i915_gem_object *obj = ring->private;
+ u32 cs_offset = obj->gtt_offset;
+
+ if (len > I830_BATCH_LIMIT)
+ return -ENOSPC;
+
+ ret = intel_ring_begin(ring, 9+3);
+ if (ret)
+ return ret;
+ /* Blit the batch (which has now all relocs applied) to the stable batch
+ * scratch bo area (so that the CS never stumbles over its tlb
+ * invalidation bug) ... */
+ intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
+ XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+ intel_ring_emit(ring, cs_offset);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 4096);
+ intel_ring_emit(ring, offset);
+ intel_ring_emit(ring, MI_FLUSH);
+
+ /* ... and execute it. */
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, cs_offset + len - 8);
+ intel_ring_advance(ring);
+ }
+
+ return 0;
+}
+
+static int
+i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static void cleanup_status_page(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = ring->status_page.obj;
+ if (obj == NULL)
+ return;
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ ring->status_page.obj = NULL;
+}
+
+static int init_status_page(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate status page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC))
+ goto err_unref;
+
+ ret = i915_gem_object_pin(obj, 4096, true, false);
+ if (ret != 0) {
+ goto err_unref;
+ }
+
+ ring->status_page.gfx_addr = obj->gtt_offset;
+ ring->status_page.page_addr = obj->page_list[0];
+ if (ring->status_page.page_addr == NULL) {
+ ret = -ENOMEM;
+ goto err_unpin;
+ }
+ ring->status_page.obj = obj;
+ (void) memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+ ring->name, ring->status_page.gfx_addr);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+err:
+ return ret;
+}
+
+static int init_phys_status_page(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (!dev_priv->status_page_dmah) {
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff, 1);
+ if (!dev_priv->status_page_dmah)
+ return -ENOMEM;
+ }
+
+ ring->status_page.page_addr = (void *) dev_priv->status_page_dmah->vaddr;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ return 0;
+}
+static int intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ ring->size = 32 * PAGE_SIZE;
+ memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
+
+ DRM_INIT_WAITQUEUE(&ring->irq_queue, DRM_INTR_PRI(dev));
+
+ if (I915_NEED_GFX_HWS(dev)) {
+ ret = init_status_page(ring);
+ if (ret)
+ return ret;
+ } else {
+ BUG_ON(ring->id != RCS);
+ ret = init_phys_status_page(ring);
+ if (ret)
+ return ret;
+ }
+
+ obj = NULL;
+ if (!HAS_LLC(dev))
+ obj = i915_gem_object_create_stolen(dev, ring->size);
+ if (obj == NULL)
+ obj = i915_gem_alloc_object(dev, ring->size);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate ringbuffer\n");
+ ret = -ENOMEM;
+ goto err_hws;
+ }
+
+ ring->obj = obj;
+
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
+ if (ret)
+ goto err_unref;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto err_unpin;
+ ring->map.size = ring->size;
+ ring->map.offset = dev->agp_aperbase + obj->gtt_offset;
+ ring->map.type = 0;
+ ring->map.flags = 0;
+ ring->map.mtrr = 0;
+
+ drm_core_ioremap(&ring->map, dev);
+ if (ring->map.handle == NULL) {
+ DRM_ERROR("Failed to map ringbuffer.\n");
+ ret = -EINVAL;
+ goto err_unpin;
+ }
+
+ ring->virtual_start = ring->map.handle;
+ ret = ring->init(ring);
+ if (ret)
+ goto err_unmap;
+
+ /* Workaround an erratum on the i830 which causes a hang if
+ * the TAIL pointer points to within the last 2 cachelines
+ * of the buffer.
+ */
+ ring->effective_size = ring->size;
+ if (IS_I830(ring->dev) || IS_845G(ring->dev))
+ ring->effective_size -= 128;
+
+ return 0;
+
+err_unmap:
+ drm_core_ioremapfree(&ring->map, dev);
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+ ring->obj = NULL;
+err_hws:
+ cleanup_status_page(ring);
+ return ret;
+}
+
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv;
+ int ret;
+
+ if (ring->obj == NULL)
+ return;
+
+ /* Disable the ring buffer. The ring must be idle at this point */
+ dev_priv = ring->dev->dev_private;
+ ret = intel_ring_idle(ring);
+ if (ret)
+ DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+ ring->name, ret);
+
+ I915_WRITE_CTL(ring, 0);
+
+ drm_core_ioremapfree(&ring->map, ring->dev);
+
+ i915_gem_object_unpin(ring->obj);
+ drm_gem_object_unreference(&ring->obj->base);
+ ring->obj = NULL;
+
+ if (ring->cleanup)
+ ring->cleanup(ring);
+
+ cleanup_status_page(ring);
+}
+
+static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ int ret;
+
+ ret = i915_wait_seqno(ring, seqno);
+ if (!ret)
+ i915_gem_retire_requests_ring(ring);
+
+ return ret;
+}
+
+static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
+{
+ struct drm_i915_gem_request *request;
+ u32 seqno = 0;
+ int ret;
+
+ i915_gem_retire_requests_ring(ring);
+
+ if (ring->last_retired_head != 0xffffffff) {
+ ring->head = ring->last_retired_head;
+ ring->last_retired_head = 0xffffffff;
+ ring->space = ring_space(ring);
+ if (ring->space >= n)
+ return 0;
+ }
+
+ list_for_each_entry(request, struct drm_i915_gem_request, &ring->request_list, list) {
+ int space;
+
+ if (request->tail == 0xffffffff)
+ continue;
+
+ space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
+ if (space < 0)
+ space += ring->size;
+ if (space >= n) {
+ seqno = request->seqno;
+ break;
+ }
+
+ /* Consume this request in case we need more space than
+ * is available and so need to prevent a race between
+ * updating last_retired_head and direct reads of
+ * I915_RING_HEAD. It also provides a nice sanity check.
+ */
+ request->tail = 0xffffffff;
+ }
+
+ if (seqno == 0)
+ return -ENOSPC;
+
+ ret = intel_ring_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
+
+ if (ring->last_retired_head == 0xffffffff) {
+ WARN_ON(ring->last_retired_head == 0xffffffff);
+ return -ENOSPC;
+ }
+
+ ring->head = ring->last_retired_head;
+ ring->last_retired_head = 0xffffffff;
+ ring->space = ring_space(ring);
+ if (ring->space < n) {
+ WARN_ON(ring->space < n);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long end;
+ int ret;
+
+ ret = intel_ring_wait_request(ring, n);
+ if (ret != -ENOSPC)
+ return ret;
+
+ /* With GEM the hangcheck timer should kick us out of the loop,
+ * leaving it early runs the risk of corrupting GEM state (due
+ * to running on almost untested codepaths). But on resume
+ * timers don't work yet, so prevent a complete hang in that
+ * case by choosing an insanely large timeout. */
+ end = jiffies + 60 * DRM_HZ;
+
+ do {
+ ring->head = I915_READ_HEAD(ring);
+ ring->space = ring_space(ring);
+ if (ring->space >= n) {
+ return 0;
+ }
+
+ if (dev->primary->master) {
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ }
+
+ msleep(1);
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
+ } while (!time_after(jiffies, end));
+ return -EBUSY;
+}
+
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+{
+ unsigned int *virt;
+ int rem = ring->size - ring->tail;
+
+ if (ring->space < rem) {
+ int ret = ring_wait_for_space(ring, rem);
+ if (ret)
+ return ret;
+ }
+
+ virt = (unsigned int *)(uintptr_t)((caddr_t)ring->virtual_start + ring->tail);
+ rem = (int)(rem / 4);
+ while (rem--) {
+ *virt++ = MI_NOOP;
+ }
+
+ ring->tail = 0;
+ ring->space = ring_space(ring);
+
+ return 0;
+}
+
+int intel_ring_idle(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_request *idle_req;
+ u32 seqno;
+ int ret;
+
+ /* We need to add any requests required to flush the objects and ring */
+ if (ring->outstanding_lazy_request) {
+ ret = i915_add_request(ring, NULL);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait upon the last request to be completed */
+ if (list_empty(&ring->request_list))
+ return 0;
+
+ idle_req = list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request,
+ list);
+ seqno = idle_req->seqno;
+
+ return i915_wait_seqno(ring, seqno);
+}
+
+static int
+intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+{
+ if (ring->outstanding_lazy_request)
+ return 0;
+
+ return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+}
+
+static int __intel_ring_begin(struct intel_ring_buffer *ring,
+ int bytes)
+{
+ int ret;
+
+ if (ring->tail + bytes > ring->effective_size) {
+ ret = intel_wrap_ring_buffer(ring);
+ if (ret)
+ return ret;
+ }
+
+ if (ring->space < bytes) {
+ ret = ring_wait_for_space(ring, bytes);
+ if (ret)
+ return ret;
+ }
+
+ ring->space -= bytes;
+ return 0;
+}
+
+int intel_ring_begin(struct intel_ring_buffer *ring,
+ int num_dwords)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ int ret;
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
+
+ /* Preallocate the olr before touching the ring */
+ ret = intel_ring_alloc_seqno(ring);
+ if (ret)
+ return ret;
+
+ return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
+ }
+
+void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ BUG_ON(ring->outstanding_lazy_request);
+
+ if (INTEL_INFO(ring->dev)->gen >= 6) {
+ I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
+ I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
+ }
+
+ ring->set_seqno(ring, seqno);
+ ring->hangcheck.seqno = seqno;
+}
+
+void intel_ring_advance(struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ ring->tail &= ring->size - 1;
+ if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
+ return;
+ ring->write_tail(ring, ring->tail);
+}
+
+
+static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
+ u32 value)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+
+ /* Every tail move must follow the sequence below */
+
+ /* Disable notification that the ring is IDLE. The GT
+ * will then assume that it is busy and bring it out of rc6.
+ */
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+ /* Clear the context id. Here be magic! */
+ I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+
+ /* Wait for the ring not to be idle, i.e. for it to wake up. */
+ if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+ GEN6_BSD_SLEEP_INDICATOR) == 0,
+ 50))
+ DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+
+ /* Now that the ring is fully powered up, update the tail */
+ I915_WRITE_TAIL(ring, value);
+ POSTING_READ(RING_TAIL(ring->mmio_base));
+
+ /* Let the ring send IDLE messages to the GT again,
+ * and so let it sleep to conserve power when idle.
+ */
+ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+}
+
+static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate, u32 flush)
+{
+ uint32_t cmd;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ cmd = MI_FLUSH_DW;
+ /*
+ * Bspec vol 1c.5 - video engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
+ if (invalidate & I915_GEM_GPU_DOMAINS)
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+ MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ return 0;
+}
+
+static int
+hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len,
+ unsigned flags)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+ /* bit0-7 is the length on GEN6+ */
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+/* Blitter support (SandyBridge+) */
+
+static int gen6_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate, u32 flush)
+{
+ struct drm_device *dev = ring->dev;
+ uint32_t cmd;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ cmd = MI_FLUSH_DW;
+ /*
+ * Bspec vol 1c.3 - blitter engine command streamer:
+ * "If ENABLED, all TLBs will be invalidated once the flush
+ * operation is complete. This bit is only valid when the
+ * Post-Sync Operation field is a value of 1h or 3h."
+ */
+ if (invalidate & I915_GEM_DOMAIN_RENDER)
+ cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+ MI_FLUSH_DW_OP_STOREDW;
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ if (IS_GEN7(dev) && flush)
+ return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
+
+ return 0;
+}
+
+int intel_init_render_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ring->add_request = gen6_add_request;
+ ring->flush = gen7_render_ring_flush;
+ if (INTEL_INFO(dev)->gen == 6)
+ ring->flush = gen6_render_ring_flush;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
+ ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
+ ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
+ ring->signal_mbox[RCS] = GEN6_NOSYNC;
+ ring->signal_mbox[VCS] = GEN6_VRSYNC;
+ ring->signal_mbox[BCS] = GEN6_BRSYNC;
+ ring->signal_mbox[VECS] = GEN6_VERSYNC;
+ } else if (IS_GEN5(dev)) {
+ ring->add_request = pc_render_add_request;
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = pc_render_get_seqno;
+ ring->set_seqno = pc_render_set_seqno;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
+ GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
+ } else {
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
+ }
+ ring->write_tail = ring_write_tail;
+ if (IS_HASWELL(dev))
+ ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 6)
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
+
+ /* Workaround batchbuffer to combat CS tlb bug. */
+ if (HAS_BROKEN_CS_TLB(dev)) {
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate batch bo\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_object_pin(obj, 0, true, false);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to ping batch bo\n");
+ return ret;
+ }
+
+ ring->private = obj;
+ }
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
+
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ /* non-kms not supported on gen6+ */
+ return -ENODEV;
+ }
+
+ /* Note: gem is not supported on gen5/ilk without kms (the corresponding
+ * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
+ * the special gen5 functions. */
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
+ ring->write_tail = ring_write_tail;
+ if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+
+ ring->size = size;
+ ring->effective_size = ring->size;
+ if (IS_I830(ring->dev) || IS_845G(ring->dev))
+ ring->effective_size -= 128;
+
+ ring->map.offset = start;
+ ring->map.size = size;
+ ring->map.type = 0;
+ ring->map.flags = 0;
+ ring->map.mtrr = 0;
+
+ drm_core_ioremap(&ring->map, dev);
+ if (ring->map.handle == NULL) {
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
+
+ ring->virtual_start = (void *)ring->map.handle;
+
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ret = init_phys_status_page(ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_init_bsd_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
+
+ ring->name = "bsd ring";
+ ring->id = VCS;
+
+ ring->write_tail = ring_write_tail;
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ ring->mmio_base = GEN6_BSD_RING_BASE;
+ /* gen6 bsd needs a special wa for tail updates */
+ if (IS_GEN6(dev))
+ ring->write_tail = gen6_bsd_ring_write_tail;
+ ring->flush = gen6_bsd_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
+ ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
+ ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
+ ring->signal_mbox[RCS] = GEN6_RVSYNC;
+ ring->signal_mbox[VCS] = GEN6_NOSYNC;
+ ring->signal_mbox[BCS] = GEN6_BVSYNC;
+ ring->signal_mbox[VECS] = GEN6_VEVSYNC;
+ } else {
+ ring->mmio_base = BSD_RING_BASE;
+ ring->flush = bsd_ring_flush;
+ ring->add_request = i9xx_add_request;
+ ring->get_seqno = ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ if (IS_GEN5(dev)) {
+ ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ } else {
+ ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ }
+ ring->init = init_ring_common;
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_init_blt_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+
+ ring->name = "blitter ring";
+ ring->id = BCS;
+
+ ring->mmio_base = BLT_RING_BASE;
+ ring->write_tail = ring_write_tail;
+ ring->flush = gen6_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
+ ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
+ ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
+ ring->signal_mbox[RCS] = GEN6_RBSYNC;
+ ring->signal_mbox[VCS] = GEN6_VBSYNC;
+ ring->signal_mbox[BCS] = GEN6_NOSYNC;
+ ring->signal_mbox[VECS] = GEN6_VEBSYNC;
+ ring->init = init_ring_common;
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_init_vebox_ring_buffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
+
+ ring->name = "video enhancement ring";
+ ring->id = VECS;
+
+ ring->mmio_base = VEBOX_RING_BASE;
+ ring->write_tail = ring_write_tail;
+ ring->flush = gen6_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT |
+ PM_VEBOX_CS_ERROR_INTERRUPT;
+ ring->irq_get = hsw_vebox_get_irq;
+ ring->irq_put = hsw_vebox_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
+ ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
+ ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
+ ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->signal_mbox[RCS] = GEN6_RVESYNC;
+ ring->signal_mbox[VCS] = GEN6_VVESYNC;
+ ring->signal_mbox[BCS] = GEN6_BVESYNC;
+ ring->signal_mbox[VECS] = GEN6_NOSYNC;
+ ring->init = init_ring_common;
+
+ return intel_init_ring_buffer(dev, ring);
+}
+
+int
+intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ if (!ring->gpu_caches_dirty)
+ return 0;
+
+ ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+{
+ uint32_t flush_domains;
+ int ret;
+
+ flush_domains = 0;
+ if (ring->gpu_caches_dirty)
+ flush_domains = I915_GEM_GPU_DOMAINS;
+
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+ if (ret)
+ return ret;
+
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
diff --git a/usr/src/uts/intel/io/i915/intel_ringbuffer.h b/usr/src/uts/intel/io/i915/intel_ringbuffer.h
new file mode 100644
index 0000000..1970a54
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_ringbuffer.h
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2008-2010, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Zou Nan hai <nanhai.zou@intel.com>
+ * Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+/*
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
+ * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
+ * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
+ *
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
+ * cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+#define I915_RING_FREE_SPACE 64
+
+struct intel_hw_status_page {
+ void *page_addr;
+ unsigned int gfx_addr;
+ struct drm_i915_gem_object *obj;
+};
+
+#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+
+#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+
+#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+
+#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+
+#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
+#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
+
+#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
+#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
+#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
+
+enum intel_ring_hangcheck_action { wait, active, kick, hung };
+
+struct intel_ring_hangcheck {
+ bool deadlock;
+ u32 seqno;
+ u32 acthd;
+ int score;
+ enum intel_ring_hangcheck_action action;
+};
+
+struct intel_ring_buffer {
+ const char *name;
+ enum intel_ring_id {
+ RCS = 0x0,
+ VCS,
+ BCS,
+ VECS,
+ } id;
+#define I915_NUM_RINGS 4
+ u32 mmio_base;
+ void *virtual_start;
+ struct drm_device *dev;
+ struct drm_i915_gem_object *obj;
+
+ u32 head;
+ u32 tail;
+ int space;
+ int size;
+ int effective_size;
+ struct intel_hw_status_page status_page;
+
+ /** We track the position of the requests in the ring buffer, and
+ * when each is retired we increment last_retired_head as the GPU
+ * must have finished processing the request and so we know we
+ * can advance the ringbuffer up to that position.
+ *
+ * last_retired_head is set to -1 after the value is consumed so
+ * we can detect new retirements.
+ */
+ u32 last_retired_head;
+
+ struct {
+ u32 gt; /* protected by dev_priv->irq_lock */
+ u32 pm; /* protected by dev_priv->rps.lock (sucks) */
+ } irq_refcount;
+ u32 irq_enable_mask; /* bitmask to enable ring interrupt */
+ u32 trace_irq_seqno;
+ u32 sync_seqno[I915_NUM_RINGS-1];
+ bool (*irq_get)(struct intel_ring_buffer *ring);
+ void (*irq_put)(struct intel_ring_buffer *ring);
+
+ int (*init)(struct intel_ring_buffer *ring);
+
+ void (*write_tail)(struct intel_ring_buffer *ring,
+ u32 value);
+ int (*flush)(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains);
+ int (*add_request)(struct intel_ring_buffer *ring);
+ /* Some chipsets are not quite as coherent as advertised and need
+ * an expensive kick to force a true read of the up-to-date seqno.
+ * However, the up-to-date seqno is not always required and the last
+ * seen value is good enough. Note that the seqno will always be
+ * monotonic, even if not coherent.
+ */
+ u32 (*get_seqno)(struct intel_ring_buffer *ring,
+ bool lazy_coherency);
+ void (*set_seqno)(struct intel_ring_buffer *ring,
+ u32 seqno);
+ int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
+ u32 offset, u32 length,
+ unsigned flags);
+#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
+ void (*cleanup)(struct intel_ring_buffer *ring);
+ int (*sync_to)(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *to,
+ u32 seqno);
+
+ /* our mbox written by others */
+ u32 semaphore_register[I915_NUM_RINGS];
+ /* mboxes this ring signals to */
+ u32 signal_mbox[I915_NUM_RINGS];
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ /**
+ * Do we have some not yet emitted requests outstanding?
+ */
+ u32 outstanding_lazy_request;
+ bool gpu_caches_dirty;
+ bool fbc_dirty;
+
+ wait_queue_head_t irq_queue;
+ drm_local_map_t map;
+
+ /**
+ * Do an explicit TLB flush before MI_SET_CONTEXT
+ */
+ bool itlb_before_ctx_switch;
+ struct i915_hw_context *default_context;
+ struct i915_hw_context *last_context;
+
+ struct intel_ring_hangcheck hangcheck;
+
+ void *private;
+};
+
+static inline bool
+intel_ring_initialized(struct intel_ring_buffer *ring)
+{
+ return ring->obj != NULL;
+}
+
+static inline unsigned
+intel_ring_flag(struct intel_ring_buffer *ring)
+{
+ return 1 << ring->id;
+}
+
+static inline u32
+intel_ring_sync_index(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *other)
+{
+ int idx;
+
+ /*
+ * cs -> 0 = vcs, 1 = bcs
+ * vcs -> 0 = bcs, 1 = cs,
+ * bcs -> 0 = cs, 1 = vcs.
+ */
+
+ idx = (other - ring) - 1;
+ if (idx < 0)
+ idx += I915_NUM_RINGS;
+
+ return idx;
+}
+
+static inline u32
+intel_read_status_page(struct intel_ring_buffer *ring,
+ int reg)
+{
+ u32 *regs = ring->status_page.page_addr;
+ return regs[reg];
+}
+
+static inline void
+intel_write_status_page(struct intel_ring_buffer *ring,
+ int reg, u32 value)
+{
+ u32 *regs = ring->status_page.page_addr;
+ regs[reg] = value;
+}
+
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
+ *
+ * The area from dword 0x20 to 0x3ff is available for driver usage.
+ */
+#define I915_GEM_HWS_INDEX 0x20
+#define I915_GEM_HWS_SCRATCH_INDEX 0x30
+#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+
+int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
+int intel_wait_ring_idle(struct intel_ring_buffer *ring);
+
+int intel_ring_begin(struct intel_ring_buffer *ring, int n);
+
+static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+ u32 data)
+{
+ unsigned int *virt = (unsigned int *)((intptr_t)ring->virtual_start + ring->tail);
+ *virt = data;
+ ring->tail += 4;
+}
+
+void intel_ring_advance(struct intel_ring_buffer *ring);
+int intel_ring_idle(struct intel_ring_buffer *ring);
+void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
+int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
+int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
+
+int intel_init_render_ring_buffer(struct drm_device *dev);
+int intel_init_bsd_ring_buffer(struct drm_device *dev);
+int intel_init_blt_ring_buffer(struct drm_device *dev);
+int intel_init_vebox_ring_buffer(struct drm_device *dev);
+
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+
+static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
+{
+ return ring->tail;
+}
+
+static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
+{
+ BUG_ON(ring->outstanding_lazy_request == 0);
+ return ring->outstanding_lazy_request;
+}
+
+static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
+{
+ if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
+ ring->trace_irq_seqno = seqno;
+}
+
+/* DRI warts */
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
+
+#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/usr/src/uts/intel/io/i915/intel_sdvo.c b/usr/src/uts/intel/io/i915/intel_sdvo.c
new file mode 100644
index 0000000..ba51b9a
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_sdvo.c
@@ -0,0 +1,2961 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2006-2007, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_sun_i2c.h" /* OSOL_i915 */
+#include "intel_drv.h"
+#include "drm_edid.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_sdvo_regs.h"
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+ SDVO_TV_MASK)
+
+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
+
+
+static const char *tv_format_names[] = {
+ "NTSC_M" , "NTSC_J" , "NTSC_443",
+ "PAL_B" , "PAL_D" , "PAL_G" ,
+ "PAL_H" , "PAL_I" , "PAL_M" ,
+ "PAL_N" , "PAL_NC" , "PAL_60" ,
+ "SECAM_B" , "SECAM_D" , "SECAM_G" ,
+ "SECAM_K" , "SECAM_K1", "SECAM_L" ,
+ "SECAM_60"
+};
+
+#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
+
+struct intel_sdvo {
+ struct intel_encoder base;
+
+ struct i2c_adapter *i2c;
+ u8 slave_addr;
+
+ struct i2c_adapter ddc;
+
+ /* Register for the SDVO device: SDVOB or SDVOC */
+ uint32_t sdvo_reg;
+
+ /* Active outputs controlled by this SDVO output */
+ uint16_t controlled_output;
+
+ /*
+ * Capabilities of the SDVO device returned by
+ * i830_sdvo_get_capabilities()
+ */
+ struct intel_sdvo_caps caps;
+
+ /* Pixel clock limitations reported by the SDVO device, in kHz */
+ int pixel_clock_min, pixel_clock_max;
+
+ /*
+ * For multiple function SDVO device,
+ * this is for current attached outputs.
+ */
+ uint16_t attached_output;
+
+ /*
+ * Hotplug activation bits for this device
+ */
+ uint16_t hotplug_active;
+
+ /**
+ * This is used to select the color range of RBG outputs in HDMI mode.
+ * It is only valid when using TMDS encoding and 8 bit per color mode.
+ */
+ uint32_t color_range;
+ bool color_range_auto;
+
+ /**
+ * This is set if we're going to treat the device as TV-out.
+ *
+ * While we have these nice friendly flags for output types that ought
+ * to decide this for us, the S-Video output on our HDMI+S-Video card
+ * shows up as RGB1 (VGA).
+ */
+ bool is_tv;
+
+ /* On different gens SDVOB is at different places. */
+ bool is_sdvob;
+
+ /* This is for current tv format name */
+ int tv_format_index;
+
+ /**
+ * This is set if we treat the device as HDMI, instead of DVI.
+ */
+ bool is_hdmi;
+ bool has_hdmi_monitor;
+ bool has_hdmi_audio;
+ bool rgb_quant_range_selectable;
+
+ /**
+ * This is set if we detect output of sdvo device as LVDS.
+ * have a valid fixed mode to use with the panel.
+ */
+ bool is_lvds;
+
+ /**
+ * This is sdvo fixed pannel mode pointer
+ */
+ struct drm_display_mode *sdvo_lvds_fixed_mode;
+
+ /* DDC bus used by this SDVO encoder */
+ uint8_t ddc_bus;
+
+ /*
+ * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
+ */
+ uint8_t dtd_sdvo_flags;
+};
+
+struct intel_sdvo_connector {
+ struct intel_connector base;
+
+ /* Mark the type of connector */
+ uint16_t output_flag;
+
+ enum hdmi_force_audio force_audio;
+
+ /* This contains all current supported TV format */
+ u8 tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format;
+
+ /* add the property for the SDVO-TV */
+ struct drm_property *left;
+ struct drm_property *right;
+ struct drm_property *top;
+ struct drm_property *bottom;
+ struct drm_property *hpos;
+ struct drm_property *vpos;
+ struct drm_property *contrast;
+ struct drm_property *saturation;
+ struct drm_property *hue;
+ struct drm_property *sharpness;
+ struct drm_property *flicker_filter;
+ struct drm_property *flicker_filter_adaptive;
+ struct drm_property *flicker_filter_2d;
+ struct drm_property *tv_chroma_filter;
+ struct drm_property *tv_luma_filter;
+ struct drm_property *dot_crawl;
+
+ /* add the property for the SDVO-TV/LVDS */
+ struct drm_property *brightness;
+
+ /* Add variable to record current setting for the above property */
+ u32 left_margin, right_margin, top_margin, bottom_margin;
+
+ /* this is to get the range of margin.*/
+ u32 max_hscan, max_vscan;
+ u32 max_hpos, cur_hpos;
+ u32 max_vpos, cur_vpos;
+ u32 cur_brightness, max_brightness;
+ u32 cur_contrast, max_contrast;
+ u32 cur_saturation, max_saturation;
+ u32 cur_hue, max_hue;
+ u32 cur_sharpness, max_sharpness;
+ u32 cur_flicker_filter, max_flicker_filter;
+ u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
+ u32 cur_flicker_filter_2d, max_flicker_filter_2d;
+ u32 cur_tv_chroma_filter, max_tv_chroma_filter;
+ u32 cur_tv_luma_filter, max_tv_luma_filter;
+ u32 cur_dot_crawl, max_dot_crawl;
+};
+
+static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_sdvo, base.base);
+}
+
+static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_sdvo, base);
+}
+
+static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
+{
+ return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base);
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
+static bool
+intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type);
+static bool
+intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector);
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 bval = val, cval = val;
+ int i;
+
+ if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
+ I915_WRITE(intel_sdvo->sdvo_reg, val);
+ I915_READ(intel_sdvo->sdvo_reg);
+ return;
+ }
+
+ if (intel_sdvo->sdvo_reg == GEN3_SDVOB)
+ cval = I915_READ(GEN3_SDVOC);
+ else
+ bval = I915_READ(GEN3_SDVOB);
+
+ /*
+ * Write the registers twice for luck. Sometimes,
+ * writing them only once doesn't appear to 'stick'.
+ * The BIOS does this too. Yay, magic
+ */
+ for (i = 0; i < 2; i++)
+ {
+ I915_WRITE(GEN3_SDVOB, bval);
+ I915_READ(GEN3_SDVOB);
+ I915_WRITE(GEN3_SDVOC, cval);
+ I915_READ(GEN3_SDVOC);
+ }
+}
+
+static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = intel_sdvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr,
+ },
+ {
+ .addr = intel_sdvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = ch,
+ }
+ };
+ int ret;
+
+ if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
+ return true;
+
+ DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+ return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+static const struct _sdvo_cmd_name {
+ u8 cmd;
+ const char *name;
+} sdvo_cmd_names[] = {
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+ /* Add the op code for SDVO enhancements */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+ /* HDMI op code */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+};
+
+#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
+
+static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s: W: %02X ",
+ SDVO_NAME(intel_sdvo), cmd);
+ for (i = 0; i < args_len; i++)
+ DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
+ for (; i < 8; i++)
+ DRM_LOG_KMS(" ");
+ for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+ if (cmd == sdvo_cmd_names[i].cmd) {
+ DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(sdvo_cmd_names))
+ DRM_LOG_KMS("(%02X)", cmd);
+ DRM_LOG_KMS("\n");
+}
+
+static const char *cmd_status_names[] = {
+ "Power on",
+ "Success",
+ "Not supported",
+ "Invalid arg",
+ "Pending",
+ "Target not specified",
+ "Scaling not supported"
+};
+
+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ u8 *buf, status;
+ struct i2c_msg *msgs;
+ int i, ret = true;
+
+ /* Would be simpler to allocate both in one go ? */
+ buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
+ if (!buf)
+ return false;
+
+ msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
+ if (!msgs) {
+ kfree(buf, args_len * 2 + 2);
+ return false;
+ }
+
+ intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+
+ for (i = 0; i < args_len; i++) {
+ msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2 *i;
+ buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+ buf[2*i + 1] = ((u8*)args)[i];
+ }
+ msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2*i;
+ buf[2*i + 0] = SDVO_I2C_OPCODE;
+ buf[2*i + 1] = cmd;
+
+ /* the following two are to read the response */
+ status = SDVO_I2C_CMD_STATUS;
+ msgs[i+1].addr = intel_sdvo->slave_addr;
+ msgs[i+1].flags = 0;
+ msgs[i+1].len = 1;
+ msgs[i+1].buf = &status;
+
+ msgs[i+2].addr = intel_sdvo->slave_addr;
+ msgs[i+2].flags = I2C_M_RD;
+ msgs[i+2].len = 1;
+ msgs[i+2].buf = &status;
+
+ ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ ret = false;
+ goto out;
+ }
+ if (ret != i+3) {
+ /* failure in I2C transfer */
+ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+ ret = false;
+ }
+
+out:
+ kfree(msgs, (args_len + 3) * sizeof(*msgs));
+ kfree(buf, args_len * 2 + 2);
+ return ret;
+}
+
+static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
+ void *response, int response_len)
+{
+ u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
+ u8 status;
+ int i;
+
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+
+ /*
+ * The documentation states that all commands will be
+ * processed within 15µs, and that we need only poll
+ * the status byte a maximum of 3 times in order for the
+ * command to be complete.
+ *
+ * Check 5 times in case the hardware failed to read the docs.
+ *
+ * Also beware that the first response by many devices is to
+ * reply PENDING and stall for time. TVs are notorious for
+ * requiring longer than specified to complete their replies.
+ * Originally (in the DDX long ago), the delay was only ever 15ms
+ * with an additional delay of 30ms applied for TVs added later after
+ * many experiments. To accommodate both sets of delays, we do a
+ * sequence of slow checks if the device is falling behind and fails
+ * to reply within 5*15us.
+ */
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+
+ while (status == SDVO_CMD_STATUS_PENDING && --retry) {
+ if (retry < 10)
+ msleep(15);
+ else
+ udelay(15);
+
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+ }
+
+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+ DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+ else
+ DRM_LOG_KMS("(??? %d)", status);
+
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ goto log_fail;
+
+ /* Read the command response */
+ for (i = 0; i < response_len; i++) {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]))
+ goto log_fail;
+ DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+ }
+ DRM_LOG_KMS("\n");
+ return true;
+
+log_fail:
+ DRM_LOG_KMS("... failed\n");
+ return false;
+}
+
+static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+{
+ if (mode->clock >= 100000)
+ return 1;
+ else if (mode->clock >= 50000)
+ return 2;
+ else
+ return 4;
+}
+
+static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+ u8 ddc_bus)
+{
+ /* This must be the immediately preceding write before the i2c xfer */
+ return intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &ddc_bus, 1);
+}
+
+static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
+{
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+ return false;
+
+ return intel_sdvo_read_response(intel_sdvo, NULL, 0);
+}
+
+static bool
+intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len)
+{
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0))
+ return false;
+
+ return intel_sdvo_read_response(intel_sdvo, value, len);
+}
+
+static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
+{
+ struct intel_sdvo_set_target_input_args targets = {0};
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TARGET_INPUT,
+ &targets, sizeof(targets));
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2)
+{
+ struct intel_sdvo_get_trained_inputs_response response;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+ &response, sizeof(response)))
+ return false;
+
+ *input_1 = response.input0_trained;
+ *input_2 = response.input1_trained;
+ return true;
+}
+
+static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
+ u16 outputs)
+{
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_ACTIVE_OUTPUTS,
+ &outputs, sizeof(outputs));
+}
+
+static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
+ u16 *outputs)
+{
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ACTIVE_OUTPUTS,
+ outputs, sizeof(*outputs));
+}
+
+static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
+ int mode)
+{
+ u8 state = SDVO_ENCODER_STATE_ON;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ state = SDVO_ENCODER_STATE_ON;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ state = SDVO_ENCODER_STATE_STANDBY;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ state = SDVO_ENCODER_STATE_SUSPEND;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ state = SDVO_ENCODER_STATE_OFF;
+ break;
+ }
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+}
+
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo,
+ int *clock_min,
+ int *clock_max)
+{
+ struct intel_sdvo_pixel_clock_range clocks;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ &clocks, sizeof(clocks)))
+ return false;
+
+ /* Convert the values from units of 10 kHz to kHz. */
+ *clock_min = clocks.min * 10;
+ *clock_max = clocks.max * 10;
+ return true;
+}
+
+static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo,
+ u16 outputs)
+{
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TARGET_OUTPUT,
+ &outputs, sizeof(outputs));
+}
+
+static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_get_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_get_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_set_timing(intel_sdvo,
+ SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_set_timing(intel_sdvo,
+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_get_timing(intel_sdvo,
+ SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool
+intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+ uint16_t clock,
+ uint16_t width,
+ uint16_t height)
+{
+ struct intel_sdvo_preferred_input_timing_args args;
+
+ (void) memset(&args, 0, sizeof(args));
+ args.clock = clock;
+ args.width = width;
+ args.height = height;
+ args.interlace = 0;
+
+ if (intel_sdvo->is_lvds &&
+ (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+ intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
+ args.scaled = 1;
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ &args, sizeof(args));
+}
+
+static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+}
+
+static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
+ const struct drm_display_mode *mode)
+{
+ uint16_t width, height;
+ uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+ uint16_t h_sync_offset, v_sync_offset;
+ int mode_clock;
+
+ width = mode->hdisplay;
+ height = mode->vdisplay;
+
+ /* do some mode translations */
+ h_blank_len = mode->htotal - mode->hdisplay;
+ h_sync_len = mode->hsync_end - mode->hsync_start;
+
+ v_blank_len = mode->vtotal - mode->vdisplay;
+ v_sync_len = mode->vsync_end - mode->vsync_start;
+
+ h_sync_offset = mode->hsync_start - mode->hdisplay;
+ v_sync_offset = mode->vsync_start - mode->vdisplay;
+
+ mode_clock = mode->clock;
+ mode_clock /= 10;
+ dtd->part1.clock = (u16) mode_clock;
+
+ dtd->part1.h_active = width & 0xff;
+ dtd->part1.h_blank = h_blank_len & 0xff;
+ dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+ ((h_blank_len >> 8) & 0xf);
+ dtd->part1.v_active = height & 0xff;
+ dtd->part1.v_blank = v_blank_len & 0xff;
+ dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
+ ((v_blank_len >> 8) & 0xf);
+
+ dtd->part2.h_sync_off = h_sync_offset & 0xff;
+ dtd->part2.h_sync_width = h_sync_len & 0xff;
+ dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+ (v_sync_len & 0xf);
+ dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+ ((v_sync_len & 0x30) >> 4);
+
+ dtd->part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
+
+ dtd->part2.sdvo_flags = 0;
+ dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+ dtd->part2.reserved = 0;
+}
+
+static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+ const struct intel_sdvo_dtd *dtd)
+{
+ mode->hdisplay = dtd->part1.h_active;
+ mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+ mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
+ mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+ mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
+ mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+ mode->htotal = mode->hdisplay + dtd->part1.h_blank;
+ mode->htotal += (dtd->part1.h_high & 0xf) << 8;
+
+ mode->vdisplay = dtd->part1.v_active;
+ mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+ mode->vsync_start = mode->vdisplay;
+ mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+ mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+ mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+ mode->vsync_end = mode->vsync_start +
+ (dtd->part2.v_sync_off_width & 0xf);
+ mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+ mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
+ mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+
+ mode->clock = dtd->part1.clock * 10;
+
+ mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+}
+
+static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
+{
+ struct intel_sdvo_encode encode;
+
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPP_ENCODE,
+ &encode, sizeof(encode));
+}
+
+static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
+ uint8_t mode)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+}
+
+static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
+ uint8_t mode)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+}
+
+#if 0
+static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
+{
+ int i, j;
+ uint8_t set_buf_index[2];
+ uint8_t av_split;
+ uint8_t buf_size;
+ uint8_t buf[48];
+ uint8_t *pos;
+
+ intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+
+ for (i = 0; i <= av_split; i++) {
+ set_buf_index[0] = i; set_buf_index[1] = 0;
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2);
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+ intel_sdvo_read_response(encoder, &buf_size, 1);
+
+ pos = buf;
+ for (j = 0; j <= buf_size; j += 8) {
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
+ NULL, 0);
+ intel_sdvo_read_response(encoder, pos, 8);
+ pos += 8;
+ }
+ }
+}
+#endif
+
+static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
+ unsigned if_index, uint8_t tx_rate,
+ uint8_t *data, unsigned length)
+{
+ uint8_t set_buf_index[2] = {(uint8_t) if_index, 0 };
+ uint8_t hbuf_size, tmp[8];
+ int i;
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+ &hbuf_size, 1))
+ return false;
+
+ /* Buffer size is 0 based, hooray! */
+ hbuf_size++;
+
+ DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ if_index, length, hbuf_size);
+
+ for (i = 0; i < hbuf_size; i += 8) {
+ (void) memset(tmp, 0, 8);
+ if (i < length)
+ (void) memcpy(tmp, data + i, min(8, length - i));
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
+ tmp, 8))
+ return false;
+ }
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_TXRATE,
+ &tx_rate, 1);
+}
+
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+ uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc);
+
+ if (intel_sdvo->rgb_quant_range_selectable) {
+ if (intel_crtc->config.limited_color_range)
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+ else
+ avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+ }
+
+ avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
+
+ intel_dip_infoframe_csum(&avi_if);
+
+ /* sdvo spec says that the ecc is handled by the hw, and it looks like
+ * we must not send the ecc field, either. */
+ (void) memcpy(sdvo_data, &avi_if, 3);
+ sdvo_data[3] = avi_if.checksum;
+ (void) memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
+
+ return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
+ SDVO_HBUF_TX_VSYNC,
+ sdvo_data, sizeof(sdvo_data));
+}
+
+static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
+{
+ struct intel_sdvo_tv_format format;
+ uint32_t format_map;
+
+ format_map = 1 << intel_sdvo->tv_format_index;
+ memset(&format, 0, sizeof(format));
+ (void) memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TV_FORMAT,
+ &format, sizeof(format));
+}
+
+static bool
+intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *mode)
+{
+ struct intel_sdvo_dtd output_dtd;
+
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return false;
+
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+ return false;
+
+ return true;
+}
+
+/* Asks the sdvo controller for the preferred input mode given the output mode.
+ * Unfortunately we have to set up the full output mode to do that. */
+static bool
+intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_sdvo_dtd input_dtd;
+
+ /* Reset the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return false;
+
+ if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay))
+ return false;
+
+ if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
+ &input_dtd))
+ return false;
+
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags;
+
+ return true;
+}
+
+static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
+{
+ unsigned dotclock = pipe_config->adjusted_mode.clock;
+ struct dpll *clock = &pipe_config->dpll;
+
+ /* SDVO TV has fixed PLL values depend on its clock range,
+ this mirrors vbios setting. */
+ if (dotclock >= 100000 && dotclock < 140500) {
+ clock->p1 = 2;
+ clock->p2 = 10;
+ clock->n = 3;
+ clock->m1 = 16;
+ clock->m2 = 8;
+ } else if (dotclock >= 140500 && dotclock <= 200000) {
+ clock->p1 = 1;
+ clock->p2 = 10;
+ clock->n = 6;
+ clock->m1 = 12;
+ clock->m2 = 8;
+ } else {
+ DRM_ERROR("SDVO TV clock out of range: %i\n", dotclock);
+ }
+
+ pipe_config->clock_set = true;
+}
+
+static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct drm_display_mode *mode = &pipe_config->requested_mode;
+
+ DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
+ pipe_config->pipe_bpp = 8*3;
+
+ if (HAS_PCH_SPLIT(encoder->base.dev))
+ pipe_config->has_pch_encoder = true;
+
+ /* We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+ * timings, even though this isn't really the right place in
+ * the sequence to do it. Oh well.
+ */
+ if (intel_sdvo->is_tv) {
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
+ return false;
+
+ (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
+ pipe_config->sdvo_tv_clock = true;
+ } else if (intel_sdvo->is_lvds) {
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
+ intel_sdvo->sdvo_lvds_fixed_mode))
+ return false;
+
+ (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
+ }
+
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will factor out the multiplier during mode_set.
+ */
+ pipe_config->pixel_multiplier =
+ intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ adjusted_mode->clock *= pipe_config->pixel_multiplier;
+
+ if (intel_sdvo->color_range_auto) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ /* FIXME: This bit is only valid when using TMDS encoding and 8
+ * bit per color mode. */
+ if (intel_sdvo->has_hdmi_monitor &&
+ drm_match_cea_mode(adjusted_mode) > 1)
+ intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
+ else
+ intel_sdvo->color_range = 0;
+ }
+
+ if (intel_sdvo->color_range)
+ pipe_config->limited_color_range = true;
+
+ /* Clock computation needs to happen after pixel multiplier. */
+ if (intel_sdvo->is_tv)
+ i9xx_adjust_sdvo_tv_clock(pipe_config);
+
+ return true;
+}
+
+static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = intel_encoder->base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base);
+ u32 sdvox;
+ struct intel_sdvo_in_out_map in_out;
+ struct intel_sdvo_dtd input_dtd, output_dtd;
+ int rate;
+
+ if (!mode)
+ return;
+
+ /* First, set the input mapping for the first input to our controlled
+ * output. This is only correct if we're a single-input device, in
+ * which case the first input is the output from the appropriate SDVO
+ * channel on the motherboard. In a two-input device, the first input
+ * will be SDVOB and the second SDVOC.
+ */
+ in_out.in0 = intel_sdvo->attached_output;
+ in_out.in1 = 0;
+
+ intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_IN_OUT_MAP,
+ &in_out, sizeof(in_out));
+
+ /* Set the output timings to the screen */
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return;
+
+ /* lvds has a special fixed output timing. */
+ if (intel_sdvo->is_lvds)
+ intel_sdvo_get_dtd_from_mode(&output_dtd,
+ intel_sdvo->sdvo_lvds_fixed_mode);
+ else
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+ DRM_INFO("Setting output timings on %s failed\n",
+ SDVO_NAME(intel_sdvo));
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return;
+
+ if (intel_sdvo->has_hdmi_monitor) {
+ (void) intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+ (void) intel_sdvo_set_colorimetry(intel_sdvo,
+ SDVO_COLORIMETRY_RGB256);
+ (void) intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
+ } else
+ (void) intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
+
+ if (intel_sdvo->is_tv &&
+ !intel_sdvo_set_tv_format(intel_sdvo))
+ return;
+
+ /* We have tried to get input timing in mode_fixup, and filled into
+ * adjusted_mode.
+ */
+ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
+ input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
+ if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
+ DRM_INFO("Setting input timings on %s failed\n",
+ SDVO_NAME(intel_sdvo));
+
+ switch (intel_crtc->config.pixel_multiplier) {
+ default:
+ DRM_ERROR("unknown pixel mutlipler specified\n");
+ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+ case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+ case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+ }
+ if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate))
+ return;
+
+ /* Set the SDVO control regs. */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* The real mode polarity is set by the SDVO commands, using
+ * struct intel_sdvo_dtd. */
+ sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
+ if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi)
+ sdvox |= intel_sdvo->color_range;
+ if (INTEL_INFO(dev)->gen < 5)
+ sdvox |= SDVO_BORDER_ENABLE;
+ } else {
+ sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ switch (intel_sdvo->sdvo_reg) {
+ case GEN3_SDVOB:
+ sdvox &= SDVOB_PRESERVE_MASK;
+ break;
+ case GEN3_SDVOC:
+ sdvox &= SDVOC_PRESERVE_MASK;
+ break;
+ }
+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+ }
+
+ if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+ else
+ sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
+
+ if (intel_sdvo->has_hdmi_audio)
+ sdvox |= SDVO_AUDIO_ENABLE;
+
+ /*LINTED E_NOP_IF_STMT*/
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* done in crtc_mode_set as the dpll_md reg must be written early */
+ /*LINTED E_NOP_IF_STMT*/
+ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ /* done in crtc_mode_set as it lives inside the dpll register */
+ } else {
+ sdvox |= (intel_crtc->config.pixel_multiplier - 1)
+ << SDVO_PORT_MULTIPLY_SHIFT;
+ }
+
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
+ INTEL_INFO(dev)->gen < 5)
+ sdvox |= SDVO_STALL_SELECT;
+ intel_sdvo_write_sdvox(intel_sdvo, sdvox);
+}
+
+static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(&connector->base);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+ u16 active_outputs = 0;
+
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+ if (active_outputs & intel_sdvo_connector->output_flag)
+ return true;
+ else
+ return false;
+}
+
+static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u16 active_outputs = 0;
+ u32 tmp;
+
+ tmp = I915_READ(intel_sdvo->sdvo_reg);
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+ if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
+ return false;
+
+ if (HAS_PCH_CPT(dev))
+ *pipe = PORT_TO_PIPE_CPT(tmp);
+ else
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void intel_sdvo_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_sdvo_dtd dtd;
+ int encoder_pixel_multiplier = 0;
+ u32 flags = 0, sdvox;
+ u8 val;
+ bool ret;
+
+ ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
+ if (!ret) {
+ /* Some sdvo encoders are not spec compliant and don't
+ * implement the mandatory get_timings function. */
+ DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
+ pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
+ } else {
+ if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dtd.part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ }
+
+ pipe_config->adjusted_mode.flags |= flags;
+
+ /*
+ * pixel multiplier readout is tricky: Only on i915g/gm it is stored in
+ * the sdvo port register, on all other platforms it is part of the dpll
+ * state. Since the general pipe state readout happens before the
+ * encoder->get_config we so already have a valid pixel multplier on all
+ * other platfroms.
+ */
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ pipe_config->pixel_multiplier =
+ ((sdvox & SDVO_PORT_MULTIPLY_MASK)
+ >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
+ }
+
+ /* Cross check the port pixel multiplier with the sdvo encoder state. */
+ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1);
+ switch (val) {
+ case SDVO_CLOCK_RATE_MULT_1X:
+ encoder_pixel_multiplier = 1;
+ break;
+ case SDVO_CLOCK_RATE_MULT_2X:
+ encoder_pixel_multiplier = 2;
+ break;
+ case SDVO_CLOCK_RATE_MULT_4X:
+ encoder_pixel_multiplier = 4;
+ break;
+ }
+
+ if(HAS_PCH_SPLIT(dev))
+ return; /* no pixel multiplier readout support yet */
+
+ if(encoder_pixel_multiplier != pipe_config->pixel_multiplier)
+ DRM_ERROR("SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
+ pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+}
+
+static void intel_disable_sdvo(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ u32 temp;
+
+ intel_sdvo_set_active_outputs(intel_sdvo, 0);
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_OFF);
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) != 0) {
+ /* HW workaround for IBX, we need to move the port to
+ * transcoder A before disabling it. */
+ if (HAS_PCH_IBX(encoder->base.dev)) {
+ struct drm_crtc *crtc = encoder->base.crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_sdvo->sdvo_reg, temp);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_sdvo->sdvo_reg, temp);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(encoder->base.dev, pipe);
+ else
+ msleep(50);
+ }
+ }
+
+ intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
+ }
+}
+
+static void intel_enable_sdvo(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ u32 temp;
+ bool input1, input2;
+ int i;
+ u8 status;
+
+ temp = I915_READ(intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) == 0) {
+ /* HW workaround for IBX, we need to move the port
+ * to transcoder A before disabling it, so restore it here. */
+ if (HAS_PCH_IBX(dev))
+ temp |= SDVO_PIPE_SEL(intel_crtc->pipe);
+
+ intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+ }
+ for (i = 0; i < 2; i++)
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
+ /* Warn if the device reported failure to sync.
+ * A lot of SDVO devices fail to notify of sync, but it's
+ * a given it the status is a success, we succeeded.
+ */
+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+ DRM_DEBUG_KMS("First %s output reported failure to "
+ "sync\n", SDVO_NAME(intel_sdvo));
+ }
+
+ if (0)
+ (void) intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_ON);
+ (void) intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+}
+
+static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_crtc *crtc;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+ /* dvo supports only 2 dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
+ if (mode == connector->dpms)
+ return;
+
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+ crtc = intel_sdvo->base.base.crtc;
+ if (!crtc) {
+ intel_sdvo->base.connectors_active = false;
+ return;
+ }
+
+ /* We set active outputs manually below in case pipe dpms doesn't change
+ * due to cloning. */
+ if (mode != DRM_MODE_DPMS_ON) {
+ intel_sdvo_set_active_outputs(intel_sdvo, 0);
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
+
+ intel_sdvo->base.connectors_active = false;
+
+ intel_crtc_update_dpms(crtc);
+ } else {
+ intel_sdvo->base.connectors_active = true;
+
+ intel_crtc_update_dpms(crtc);
+
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
+ intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+ }
+
+ intel_modeset_check_state(connector->dev);
+}
+
+static int intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (intel_sdvo->pixel_clock_min > mode->clock)
+ return MODE_CLOCK_LOW;
+
+ if (intel_sdvo->pixel_clock_max < mode->clock)
+ return MODE_CLOCK_HIGH;
+
+ if (intel_sdvo->is_lvds) {
+ if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return MODE_OK;
+}
+
+static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
+{
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_DEVICE_CAPS,
+ caps, sizeof(*caps)))
+ return false;
+
+ DRM_DEBUG_KMS("SDVO capabilities:\n"
+ " vendor_id: %d\n"
+ " device_id: %d\n"
+ " device_rev_id: %d\n"
+ " sdvo_version_major: %d\n"
+ " sdvo_version_minor: %d\n"
+ " sdvo_inputs_mask: %d\n"
+ " smooth_scaling: %d\n"
+ " sharp_scaling: %d\n"
+ " up_scaling: %d\n"
+ " down_scaling: %d\n"
+ " stall_support: %d\n"
+ " output_flags: %d\n",
+ caps->vendor_id,
+ caps->device_id,
+ caps->device_rev_id,
+ caps->sdvo_version_major,
+ caps->sdvo_version_minor,
+ caps->sdvo_inputs_mask,
+ caps->smooth_scaling,
+ caps->sharp_scaling,
+ caps->up_scaling,
+ caps->down_scaling,
+ caps->stall_support,
+ caps->output_flags);
+
+ return true;
+}
+
+static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ uint16_t hotplug;
+
+ /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
+ * on the line. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ return 0;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &hotplug, sizeof(hotplug)))
+ return 0;
+
+ return hotplug;
+}
+
+static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
+{
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+ &intel_sdvo->hotplug_active, 2);
+}
+
+static bool
+intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
+{
+ /* Is there more than one type of output? */
+ return hweight16(intel_sdvo->caps.output_flags) > 1;
+}
+
+static struct edid *
+intel_sdvo_get_edid(struct drm_connector *connector)
+{
+ struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ return drm_get_edid(connector, &sdvo->ddc);
+}
+
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+ return drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ dev_priv->vbt.crt_ddc_pin));
+}
+
+static enum drm_connector_status
+intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ enum drm_connector_status status;
+ struct edid *edid;
+
+ edid = intel_sdvo_get_edid(connector);
+
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
+ u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
+
+ /*
+ * Don't use the 1 as the argument of DDC bus switch to get
+ * the EDID. It is used for SDVO SPD ROM.
+ */
+ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+ intel_sdvo->ddc_bus = ddc;
+ edid = intel_sdvo_get_edid(connector);
+ if (edid)
+ break;
+ }
+ /*
+ * If we found the EDID on the other bus,
+ * assume that is the correct DDC bus.
+ */
+ if (edid == NULL)
+ intel_sdvo->ddc_bus = saved_ddc;
+ }
+
+ /*
+ * When there is no edid and no monitor is connected with VGA
+ * port, try to use the CRT ddc to read the EDID for DVI-connector.
+ */
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+
+ status = connector_status_unknown;
+ if (edid != NULL) {
+ /* DDC bus is shared, match EDID to connector type */
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ if (intel_sdvo->is_hdmi) {
+ intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ intel_sdvo->rgb_quant_range_selectable =
+ drm_rgb_quant_range_selectable(edid);
+ }
+ } else
+ status = connector_status_disconnected;
+ kfree(edid, EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1));
+ }
+
+ if (status == connector_status_connected) {
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO)
+ intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON);
+ }
+
+ return status;
+}
+
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+ struct edid *edid)
+{
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+ DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+ connector_is_digital, monitor_is_digital);
+ return connector_is_digital == monitor_is_digital;
+}
+
+static enum drm_connector_status
+intel_sdvo_detect(struct drm_connector *connector, bool force)
+{
+ uint16_t response;
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ enum drm_connector_status ret;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS,
+ &response, 2))
+ return connector_status_unknown;
+
+ DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+ response & 0xff, response >> 8,
+ intel_sdvo_connector->output_flag);
+
+ if (response == 0)
+ return connector_status_disconnected;
+
+ intel_sdvo->attached_output = response;
+
+ intel_sdvo->has_hdmi_monitor = false;
+ intel_sdvo->has_hdmi_audio = false;
+ intel_sdvo->rgb_quant_range_selectable = false;
+
+ if ((intel_sdvo_connector->output_flag & response) == 0)
+ ret = connector_status_disconnected;
+ else if (IS_TMDS(intel_sdvo_connector))
+ ret = intel_sdvo_tmds_sink_detect(connector);
+ else {
+ struct edid *edid;
+
+ /* if we have an edid check it matches the connection */
+ edid = intel_sdvo_get_edid(connector);
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+ if (edid != NULL) {
+ if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+ edid))
+ ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+
+ kfree(edid, EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1));
+ } else
+ ret = connector_status_connected;
+ }
+
+ /* May update encoder flag for like clock for SDVO TV, etc.*/
+ if (ret == connector_status_connected) {
+ intel_sdvo->is_tv = false;
+ intel_sdvo->is_lvds = false;
+
+ if (response & SDVO_TV_MASK)
+ intel_sdvo->is_tv = true;
+ if (response & SDVO_LVDS_MASK)
+ intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+ }
+
+ return ret;
+}
+
+static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+{
+ struct edid *edid;
+
+ /* set the bus switch and get the modes */
+ edid = intel_sdvo_get_edid(connector);
+
+ /*
+ * Mac mini hack. On this device, the DVI-I connector shares one DDC
+ * link between analog and digital outputs. So, if the regular SDVO
+ * DDC fails, check to see if the analog output is disconnected, in
+ * which case we'll look there for the digital DDC data.
+ */
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+
+ if (edid != NULL) {
+ if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+ edid)) {
+ (void) drm_mode_connector_update_edid_property(connector, edid);
+ (void) drm_add_edid_modes(connector, edid);
+ }
+
+ kfree(edid, EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1));
+ }
+}
+
+/*
+ * Set of SDVO TV modes.
+ * Note! This is in reply order (see loop in get_tv_modes).
+ * XXX: all 60Hz refresh?
+ */
+static const struct drm_display_mode sdvo_tv_modes[] = {
+ { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
+ 416, 0, 200, 201, 232, 233, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
+ 416, 0, 240, 241, 272, 273, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
+ 496, 0, 300, 301, 332, 333, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
+ 736, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
+ 736, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
+ 736, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
+ 800, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
+ 800, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
+ 816, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
+ 816, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
+ 816, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
+ 816, 0, 540, 541, 572, 573, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
+ 816, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
+ 864, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
+ 896, 0, 600, 601, 632, 633, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
+ 928, 0, 624, 625, 656, 657, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
+ 1016, 0, 766, 767, 798, 799, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
+ 1120, 0, 768, 769, 800, 801, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
+ 1376, 0, 1024, 1025, 1056, 1057, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_sdtv_resolution_request tv_res;
+ uint32_t reply = 0, format_map = 0;
+ int i;
+
+ /* Read the list of supported input resolutions for the selected TV
+ * format.
+ */
+ format_map = 1 << intel_sdvo->tv_format_index;
+ memcpy(&tv_res, &format_map,
+ min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
+
+ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
+ return;
+
+ if (!intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ &tv_res, sizeof(tv_res)))
+ return;
+ if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+ if (reply & (1 << i)) {
+ struct drm_display_mode *nmode;
+ nmode = drm_mode_duplicate(connector->dev,
+ &sdvo_tv_modes[i]);
+ if (nmode)
+ drm_mode_probed_add(connector, nmode);
+ }
+}
+
+static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_display_mode *newmode;
+
+ /*
+ * Fetch modes from VBT. For SDVO prefer the VBT mode since some
+ * SDVO->LVDS transcoders can't cope with the EDID mode.
+ */
+ if (dev_priv->vbt.sdvo_lvds_vbt_mode != NULL) {
+ newmode = drm_mode_duplicate(connector->dev,
+ dev_priv->vbt.sdvo_lvds_vbt_mode);
+ if (newmode != NULL) {
+ /* Guarantee the mode is preferred */
+ newmode->type = (DRM_MODE_TYPE_PREFERRED |
+ DRM_MODE_TYPE_DRIVER);
+ drm_mode_probed_add(connector, newmode);
+ }
+ }
+
+ /*
+ * Attempt to get the mode list from DDC.
+ * Assume that the preferred modes are
+ * arranged in priority order.
+ */
+ intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+
+ list_for_each_entry(newmode, struct drm_display_mode, &connector->probed_modes, head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ intel_sdvo->sdvo_lvds_fixed_mode =
+ drm_mode_duplicate(connector->dev, newmode);
+
+ intel_sdvo->is_lvds = true;
+ break;
+ }
+ }
+
+}
+
+static int intel_sdvo_get_modes(struct drm_connector *connector)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+
+ if (IS_TV(intel_sdvo_connector))
+ intel_sdvo_get_tv_modes(connector);
+ else if (IS_LVDS(intel_sdvo_connector))
+ intel_sdvo_get_lvds_modes(connector);
+ else
+ intel_sdvo_get_ddc_modes(connector);
+
+ return !list_empty(&connector->probed_modes);
+}
+
+static void
+intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct drm_device *dev = connector->dev;
+
+ if (intel_sdvo_connector->left)
+ drm_property_destroy(dev, intel_sdvo_connector->left);
+ if (intel_sdvo_connector->right)
+ drm_property_destroy(dev, intel_sdvo_connector->right);
+ if (intel_sdvo_connector->top)
+ drm_property_destroy(dev, intel_sdvo_connector->top);
+ if (intel_sdvo_connector->bottom)
+ drm_property_destroy(dev, intel_sdvo_connector->bottom);
+ if (intel_sdvo_connector->hpos)
+ drm_property_destroy(dev, intel_sdvo_connector->hpos);
+ if (intel_sdvo_connector->vpos)
+ drm_property_destroy(dev, intel_sdvo_connector->vpos);
+ if (intel_sdvo_connector->saturation)
+ drm_property_destroy(dev, intel_sdvo_connector->saturation);
+ if (intel_sdvo_connector->contrast)
+ drm_property_destroy(dev, intel_sdvo_connector->contrast);
+ if (intel_sdvo_connector->hue)
+ drm_property_destroy(dev, intel_sdvo_connector->hue);
+ if (intel_sdvo_connector->sharpness)
+ drm_property_destroy(dev, intel_sdvo_connector->sharpness);
+ if (intel_sdvo_connector->flicker_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
+ if (intel_sdvo_connector->flicker_filter_2d)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
+ if (intel_sdvo_connector->flicker_filter_adaptive)
+ drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
+ if (intel_sdvo_connector->tv_luma_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
+ if (intel_sdvo_connector->tv_chroma_filter)
+ drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
+ if (intel_sdvo_connector->dot_crawl)
+ drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
+ if (intel_sdvo_connector->brightness)
+ drm_property_destroy(dev, intel_sdvo_connector->brightness);
+}
+
+static void intel_sdvo_destroy(struct drm_connector *connector)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+
+ if (intel_sdvo_connector->tv_format)
+ drm_property_destroy(connector->dev,
+ intel_sdvo_connector->tv_format);
+
+ intel_sdvo_destroy_enhance_property(connector);
+ drm_connector_cleanup(connector);
+ kfree(intel_sdvo_connector, sizeof(struct intel_sdvo_connector));
+}
+
+static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct edid *edid;
+ bool has_audio = false;
+
+ if (!intel_sdvo->is_hdmi)
+ return false;
+
+ edid = intel_sdvo_get_edid(connector);
+ if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+ if (edid != NULL)
+ kfree(edid, EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1));
+
+ return has_audio;
+}
+
+static int
+intel_sdvo_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint16_t temp_value;
+ uint8_t cmd;
+ int ret;
+
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = (int)val;
+ bool has_audio;
+
+ if (i == intel_sdvo_connector->force_audio)
+ return 0;
+
+ intel_sdvo_connector->force_audio = i;
+
+ if (i == HDMI_AUDIO_AUTO)
+ has_audio = intel_sdvo_detect_hdmi_audio(connector);
+ else
+ has_audio = (i == HDMI_AUDIO_ON);
+
+ if (has_audio == intel_sdvo->has_hdmi_audio)
+ return 0;
+
+ intel_sdvo->has_hdmi_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ bool old_auto = intel_sdvo->color_range_auto;
+ uint32_t old_range = intel_sdvo->color_range;
+
+ switch (val) {
+ case INTEL_BROADCAST_RGB_AUTO:
+ intel_sdvo->color_range_auto = true;
+ break;
+ case INTEL_BROADCAST_RGB_FULL:
+ intel_sdvo->color_range_auto = false;
+ intel_sdvo->color_range = 0;
+ break;
+ case INTEL_BROADCAST_RGB_LIMITED:
+ intel_sdvo->color_range_auto = false;
+ /* FIXME: this bit is only valid when using TMDS
+ * encoding and 8 bit per color mode. */
+ intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (old_auto == intel_sdvo->color_range_auto &&
+ old_range == intel_sdvo->color_range)
+ return 0;
+
+ goto done;
+ }
+
+#define CHECK_PROPERTY(name, NAME) \
+ if (intel_sdvo_connector->name == property) { \
+ if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
+ if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+ cmd = SDVO_CMD_SET_##NAME; \
+ intel_sdvo_connector->cur_##name = temp_value; \
+ goto set_value; \
+ }
+
+ if (property == intel_sdvo_connector->tv_format) {
+ if (val >= TV_FORMAT_NUM)
+ return -EINVAL;
+
+ if (intel_sdvo->tv_format_index ==
+ intel_sdvo_connector->tv_format_supported[val])
+ return 0;
+
+ intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val];
+ goto done;
+ } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
+ temp_value = (uint16_t)val;
+ if (intel_sdvo_connector->left == property) {
+ drm_object_property_set_value(&connector->base,
+ intel_sdvo_connector->right, val);
+ if (intel_sdvo_connector->left_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->left_margin = temp_value;
+ intel_sdvo_connector->right_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_hscan -
+ intel_sdvo_connector->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ goto set_value;
+ } else if (intel_sdvo_connector->right == property) {
+ drm_object_property_set_value(&connector->base,
+ intel_sdvo_connector->left, val);
+ if (intel_sdvo_connector->right_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->left_margin = temp_value;
+ intel_sdvo_connector->right_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_hscan -
+ intel_sdvo_connector->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ goto set_value;
+ } else if (intel_sdvo_connector->top == property) {
+ drm_object_property_set_value(&connector->base,
+ intel_sdvo_connector->bottom, val);
+ if (intel_sdvo_connector->top_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->top_margin = temp_value;
+ intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_vscan -
+ intel_sdvo_connector->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ goto set_value;
+ } else if (intel_sdvo_connector->bottom == property) {
+ drm_object_property_set_value(&connector->base,
+ intel_sdvo_connector->top, val);
+ if (intel_sdvo_connector->bottom_margin == temp_value)
+ return 0;
+
+ intel_sdvo_connector->top_margin = temp_value;
+ intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = intel_sdvo_connector->max_vscan -
+ intel_sdvo_connector->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ goto set_value;
+ }
+ CHECK_PROPERTY(hpos, HPOS)
+ CHECK_PROPERTY(vpos, VPOS)
+ CHECK_PROPERTY(saturation, SATURATION)
+ CHECK_PROPERTY(contrast, CONTRAST)
+ CHECK_PROPERTY(hue, HUE)
+ CHECK_PROPERTY(brightness, BRIGHTNESS)
+ CHECK_PROPERTY(sharpness, SHARPNESS)
+ CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+ CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+ CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+ CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+ CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+ CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
+ }
+
+ return -EINVAL; /* unknown property */
+
+set_value:
+ if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2))
+ return -EIO;
+
+
+done:
+ if (intel_sdvo->base.base.crtc)
+ intel_crtc_restore_mode(intel_sdvo->base.base.crtc);
+
+ return 0;
+#undef CHECK_PROPERTY
+}
+
+static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
+ .dpms = intel_sdvo_dpms,
+ .detect = intel_sdvo_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_sdvo_set_property,
+ .destroy = intel_sdvo_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
+ .get_modes = intel_sdvo_get_modes,
+ .mode_valid = intel_sdvo_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+
+ if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(encoder->dev,
+ intel_sdvo->sdvo_lvds_fixed_mode);
+
+ /* OSOL i2c_del_adapter(&intel_sdvo->ddc); */
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
+ .destroy = intel_sdvo_enc_destroy,
+};
+
+static void
+intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
+{
+ uint16_t mask = 0;
+ unsigned int num_bits;
+
+ /* Make a mask of outputs less than or equal to our own priority in the
+ * list.
+ */
+ switch (sdvo->controlled_output) {
+ case SDVO_OUTPUT_LVDS1:
+ mask |= SDVO_OUTPUT_LVDS1;
+ case SDVO_OUTPUT_LVDS0:
+ mask |= SDVO_OUTPUT_LVDS0;
+ case SDVO_OUTPUT_TMDS1:
+ mask |= SDVO_OUTPUT_TMDS1;
+ case SDVO_OUTPUT_TMDS0:
+ mask |= SDVO_OUTPUT_TMDS0;
+ case SDVO_OUTPUT_RGB1:
+ mask |= SDVO_OUTPUT_RGB1;
+ case SDVO_OUTPUT_RGB0:
+ mask |= SDVO_OUTPUT_RGB0;
+ break;
+ }
+
+ /* Count bits to find what number we are in the priority list. */
+ mask &= sdvo->caps.output_flags;
+ num_bits = hweight16(mask);
+ /* If more than 3 outputs, default to DDC bus 3 for now. */
+ if (num_bits > 3)
+ num_bits = 3;
+
+ /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+ sdvo->ddc_bus = 1 << num_bits;
+}
+
+/**
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+static void
+intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo *sdvo, u32 reg)
+{
+ struct sdvo_device_mapping *mapping;
+
+ if (sdvo->is_sdvob)
+ mapping = &(dev_priv->sdvo_mappings[0]);
+ else
+ mapping = &(dev_priv->sdvo_mappings[1]);
+
+ if (mapping->initialized)
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ else
+ intel_sdvo_guess_ddc_bus(sdvo);
+}
+
+static void
+intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo *sdvo, u32 reg)
+{
+ struct sdvo_device_mapping *mapping;
+ u8 pin;
+
+ if (sdvo->is_sdvob)
+ mapping = &dev_priv->sdvo_mappings[0];
+ else
+ mapping = &dev_priv->sdvo_mappings[1];
+
+ if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
+ pin = mapping->i2c_pin;
+ else
+ pin = GMBUS_PORT_DPB;
+
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
+
+ /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
+ * our code totally fails once we start using gmbus. Hence fall back to
+ * bit banging for now. */
+ intel_gmbus_force_bit(sdvo->i2c, true);
+}
+
+/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
+static void
+intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
+{
+ intel_gmbus_force_bit(sdvo->i2c, false);
+}
+
+static bool
+intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
+{
+ return intel_sdvo_check_supp_encode(intel_sdvo);
+}
+
+static u8
+intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct sdvo_device_mapping *my_mapping, *other_mapping;
+
+ if (sdvo->is_sdvob) {
+ my_mapping = &dev_priv->sdvo_mappings[0];
+ other_mapping = &dev_priv->sdvo_mappings[1];
+ } else {
+ my_mapping = &dev_priv->sdvo_mappings[1];
+ other_mapping = &dev_priv->sdvo_mappings[0];
+ }
+
+ /* If the BIOS described our SDVO device, take advantage of it. */
+ if (my_mapping->slave_addr)
+ return my_mapping->slave_addr;
+
+ /* If the BIOS only described a different SDVO device, use the
+ * address that it isn't using.
+ */
+ if (other_mapping->slave_addr) {
+ if (other_mapping->slave_addr == 0x70)
+ return 0x72;
+ else
+ return 0x70;
+ }
+
+ /* No SDVO device info is found for another DVO port,
+ * so use mapping assumption we had before BIOS parsing.
+ */
+ if (sdvo->is_sdvob)
+ return 0x70;
+ else
+ return 0x72;
+}
+
+static void
+intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
+ struct intel_sdvo *encoder)
+{
+ (void) drm_connector_init(encoder->base.base.dev,
+ &connector->base.base,
+ &intel_sdvo_connector_funcs,
+ connector->base.base.connector_type);
+
+ drm_connector_helper_add(&connector->base.base,
+ &intel_sdvo_connector_helper_funcs);
+
+ connector->base.base.interlace_allowed = 1;
+ connector->base.base.doublescan_allowed = 0;
+ connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
+
+ intel_connector_attach_encoder(&connector->base, &encoder->base);
+}
+
+static void
+intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *connector)
+{
+ struct drm_device *dev = connector->base.base.dev;
+
+ intel_attach_force_audio_property(&connector->base.base);
+ if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
+ intel_attach_broadcast_rgb_property(&connector->base.base);
+ intel_sdvo->color_range_auto = true;
+ }
+}
+
+static bool
+intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ }
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ if (intel_sdvo_get_hotplug_support(intel_sdvo) &
+ intel_sdvo_connector->output_flag) {
+ intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
+ /* Some SDVO devices have one-shot hotplug interrupts.
+ * Ensure that they get re-enabled when an interrupt happens.
+ */
+ intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
+ intel_sdvo_enable_hotplug(intel_encoder);
+ } else {
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ }
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+ if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ intel_sdvo->is_hdmi = true;
+ }
+
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+ if (intel_sdvo->is_hdmi)
+ intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
+
+ return true;
+}
+
+static bool
+intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+ intel_sdvo->controlled_output |= type;
+ intel_sdvo_connector->output_flag = (u16)type;
+
+ intel_sdvo->is_tv = true;
+
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+
+ if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+ goto err;
+
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ intel_sdvo_destroy(connector);
+ return false;
+}
+
+static bool
+intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ intel_sdvo_connector_init(intel_sdvo_connector,
+ intel_sdvo);
+ return true;
+}
+
+static bool
+intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+ if (device == 0) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ intel_sdvo_destroy(connector);
+ return false;
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
+{
+ intel_sdvo->is_tv = false;
+ intel_sdvo->is_lvds = false;
+
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!intel_sdvo_dvi_init(intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (!intel_sdvo_dvi_init(intel_sdvo, 1))
+ return false;
+
+ /* TV has no XXX1 function block */
+ if (flags & SDVO_OUTPUT_SVID0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_CVBS0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_YPRPB0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_RGB0)
+ if (!intel_sdvo_analog_init(intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (!intel_sdvo_analog_init(intel_sdvo, 1))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS0)
+ if (!intel_sdvo_lvds_init(intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (!intel_sdvo_lvds_init(intel_sdvo, 1))
+ return false;
+
+ if ((flags & SDVO_OUTPUT_MASK) == 0) {
+ unsigned char bytes[2];
+
+ intel_sdvo->controlled_output = 0;
+ memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
+ DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+ SDVO_NAME(intel_sdvo),
+ bytes[0], bytes[1]);
+ return false;
+ }
+ intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
+ return true;
+}
+
+static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector, *tmp;
+
+ list_for_each_entry_safe(connector, tmp, struct drm_connector,
+ &dev->mode_config.connector_list, head) {
+ if (intel_attached_encoder(connector) == &intel_sdvo->base)
+ intel_sdvo_destroy(connector);
+ }
+}
+
+static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct intel_sdvo_tv_format format;
+ uint32_t format_map, i;
+
+ if (!intel_sdvo_set_target_output(intel_sdvo, type))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+ &format, sizeof(format)))
+ return false;
+
+ memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+
+ if (format_map == 0)
+ return false;
+
+ intel_sdvo_connector->format_supported_num = 0;
+ for (i = 0 ; i < TV_FORMAT_NUM; i++)
+ if (format_map & (1 << i))
+ intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = (u8)i;
+
+
+ intel_sdvo_connector->tv_format =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", intel_sdvo_connector->format_supported_num);
+ if (!intel_sdvo_connector->tv_format)
+ return false;
+
+ for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
+ drm_property_add_enum(
+ intel_sdvo_connector->tv_format, i,
+ i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
+
+ intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
+ drm_object_attach_property(&intel_sdvo_connector->base.base.base,
+ intel_sdvo_connector->tv_format, 0);
+ return true;
+
+}
+
+#define ENHANCEMENT(name, NAME) do { \
+ if (enhancements.name) { \
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+ !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+ return false; \
+ intel_sdvo_connector->max_##name = data_value[0]; \
+ intel_sdvo_connector->cur_##name = response; \
+ intel_sdvo_connector->name = \
+ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
+ if (!intel_sdvo_connector->name) return false; \
+ drm_object_attach_property(&connector->base, \
+ intel_sdvo_connector->name, \
+ intel_sdvo_connector->cur_##name); \
+ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+ data_value[0], data_value[1], response); \
+ } \
+} while(0)
+
+static bool
+intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ /* when horizontal overscan is supported, Add the left/right property */
+ if (enhancements.overscan_h) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_H,
+ &data_value, 4))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_H,
+ &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_hscan = data_value[0];
+ intel_sdvo_connector->left_margin = data_value[0] - response;
+ intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
+ intel_sdvo_connector->left =
+ drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->left)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->left,
+ intel_sdvo_connector->left_margin);
+
+ intel_sdvo_connector->right =
+ drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->right)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->right,
+ intel_sdvo_connector->right_margin);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ if (enhancements.overscan_v) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_V,
+ &data_value, 4))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_V,
+ &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_vscan = data_value[0];
+ intel_sdvo_connector->top_margin = data_value[0] - response;
+ intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
+ intel_sdvo_connector->top =
+ drm_property_create_range(dev, 0,
+ "top_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->top)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->top,
+ intel_sdvo_connector->top_margin);
+
+ intel_sdvo_connector->bottom =
+ drm_property_create_range(dev, 0,
+ "bottom_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->bottom)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->bottom,
+ intel_sdvo_connector->bottom_margin);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ ENHANCEMENT(hpos, HPOS);
+ ENHANCEMENT(vpos, VPOS);
+ ENHANCEMENT(saturation, SATURATION);
+ ENHANCEMENT(contrast, CONTRAST);
+ ENHANCEMENT(hue, HUE);
+ ENHANCEMENT(sharpness, SHARPNESS);
+ ENHANCEMENT(brightness, BRIGHTNESS);
+ ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+ ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+ ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+ ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+ ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+ if (enhancements.dot_crawl) {
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+ return false;
+
+ intel_sdvo_connector->max_dot_crawl = 1;
+ intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+ intel_sdvo_connector->dot_crawl =
+ drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
+ if (!intel_sdvo_connector->dot_crawl)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->dot_crawl,
+ intel_sdvo_connector->cur_dot_crawl);
+ DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+ }
+
+ return true;
+}
+
+static bool
+intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ ENHANCEMENT(brightness, BRIGHTNESS);
+
+ return true;
+}
+#undef ENHANCEMENT
+
+static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector)
+{
+ union {
+ struct intel_sdvo_enhancements_reply reply;
+ uint16_t response;
+ } enhancements;
+
+ enhancements.response = 0;
+ (void) intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements));
+ if (enhancements.response == 0) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return true;
+ }
+
+ if (IS_TV(intel_sdvo_connector))
+ return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else if(IS_LVDS(intel_sdvo_connector))
+ return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else
+ return true;
+}
+
+static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+
+ if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+ return -EIO;
+
+ return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+
+static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+ return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+struct i2c_algorithm intel_sdvo_ddc_proxy = {
+ .master_xfer = intel_sdvo_ddc_proxy_xfer,
+ .functionality = intel_sdvo_ddc_proxy_func
+};
+
+static bool
+intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
+ struct drm_device *dev)
+{
+ /* OSOL_i915: sdvo->ddc.owner = THIS_MODULE; */
+ /* OSOL_i915: sdvo->ddc.class = I2C_CLASS_DDC; */
+ snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+ /* OSOL_i915: sdvo->ddc.dev.parent = &dev->pdev->dev; */
+ sdvo->ddc.algo_data = sdvo;
+ sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
+ return 1;
+ /* OSOL_i915: return i2c_add_adapter(&sdvo->ddc) == 0; */
+}
+
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ struct intel_sdvo *intel_sdvo;
+ int i;
+ intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
+ if (!intel_sdvo)
+ return false;
+
+ intel_sdvo->sdvo_reg = sdvo_reg;
+ intel_sdvo->is_sdvob = is_sdvob;
+ intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
+ intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
+ goto err_i2c_bus;
+
+ /* encoder type will be decided later */
+ intel_encoder = &intel_sdvo->base;
+ intel_encoder->type = INTEL_OUTPUT_SDVO;
+ intel_encoder->type_size = sizeof(struct intel_sdvo);
+ drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
+
+ /* Read the regs to test if we can talk to the device */
+ for (i = 0; i < 0x40; i++) {
+ u8 byte;
+
+ if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
+ DRM_DEBUG_KMS("No SDVO device found on %s\n",
+ SDVO_NAME(intel_sdvo));
+ goto err;
+ }
+ }
+
+ intel_encoder->compute_config = intel_sdvo_compute_config;
+ intel_encoder->disable = intel_disable_sdvo;
+ intel_encoder->mode_set = intel_sdvo_mode_set;
+ intel_encoder->enable = intel_enable_sdvo;
+ intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
+ intel_encoder->get_config = intel_sdvo_get_config;
+
+ /* In default case sdvo lvds is false */
+ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
+ goto err;
+
+ if (intel_sdvo_output_setup(intel_sdvo,
+ intel_sdvo->caps.output_flags) != true) {
+ DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
+ SDVO_NAME(intel_sdvo));
+ /* Output_setup can leave behind connectors! */
+ goto err_output;
+ }
+
+ /* Only enable the hotplug irq if we need it, to work around noisy
+ * hotplug lines.
+ */
+ if (intel_sdvo->hotplug_active) {
+ intel_encoder->hpd_pin =
+ intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C;
+ }
+
+ /*
+ * Cloning SDVO with anything is often impossible, since the SDVO
+ * encoder can request a special input timing mode. And even if that's
+ * not the case we have evidence that cloning a plain unscaled mode with
+ * VGA doesn't really work. Furthermore the cloning flags are way too
+ * simplistic anyway to express such constraints, so just give up on
+ * cloning for SDVO encoders.
+ */
+ intel_sdvo->base.cloneable = false;
+
+ intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ goto err_output;
+
+ if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+ &intel_sdvo->pixel_clock_min,
+ &intel_sdvo->pixel_clock_max))
+ goto err_output;
+
+ DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ "clock range %dMHz - %dMHz, "
+ "input 1: %c, input 2: %c, "
+ "output 1: %c, output 2: %c\n",
+ SDVO_NAME(intel_sdvo),
+ intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
+ intel_sdvo->caps.device_rev_id,
+ intel_sdvo->pixel_clock_min / 1000,
+ intel_sdvo->pixel_clock_max / 1000,
+ (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ /* check currently supported outputs */
+ intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+ intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+ return true;
+
+err_output:
+ intel_sdvo_output_cleanup(intel_sdvo);
+
+err:
+ drm_encoder_cleanup(&intel_encoder->base);
+// i2c_del_adapter(&intel_sdvo->ddc);
+err_i2c_bus:
+ intel_sdvo_unselect_i2c_bus(intel_sdvo);
+ kfree(intel_sdvo, sizeof(*intel_sdvo));
+
+ return false;
+}
diff --git a/usr/src/uts/intel/io/i915/intel_sdvo_regs.h b/usr/src/uts/intel/io/i915/intel_sdvo_regs.h
new file mode 100644
index 0000000..6c92ae6
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_sdvo_regs.h
@@ -0,0 +1,777 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006-2007, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#ifndef _INTEL_SDVO_REGS_H
+#define _INTEL_SDVO_REGS_H
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST (0)
+#define SDVO_OUTPUT_TMDS0 (1 << 0)
+#define SDVO_OUTPUT_RGB0 (1 << 1)
+#define SDVO_OUTPUT_CVBS0 (1 << 2)
+#define SDVO_OUTPUT_SVID0 (1 << 3)
+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
+#define SDVO_OUTPUT_SCART0 (1 << 5)
+#define SDVO_OUTPUT_LVDS0 (1 << 6)
+#define SDVO_OUTPUT_TMDS1 (1 << 8)
+#define SDVO_OUTPUT_RGB1 (1 << 9)
+#define SDVO_OUTPUT_CVBS1 (1 << 10)
+#define SDVO_OUTPUT_SVID1 (1 << 11)
+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
+#define SDVO_OUTPUT_SCART1 (1 << 13)
+#define SDVO_OUTPUT_LVDS1 (1 << 14)
+#define SDVO_OUTPUT_LAST (14)
+
+#pragma pack(1)
+struct intel_sdvo_caps {
+ u8 vendor_id;
+ u8 device_id;
+ u8 device_rev_id;
+ u8 sdvo_version_major;
+ u8 sdvo_version_minor;
+ unsigned int sdvo_inputs_mask:2;
+ unsigned int smooth_scaling:1;
+ unsigned int sharp_scaling:1;
+ unsigned int up_scaling:1;
+ unsigned int down_scaling:1;
+ unsigned int stall_support:1;
+ unsigned int pad:1;
+ u16 output_flags;
+} __attribute__((packed));
+#pragma pack()
+
+/* Note: SDVO detailed timing flags match EDID misc flags. */
+#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
+#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
+#define DTD_FLAG_INTERLACE (1 << 7)
+
+/** This matches the EDID DTD structure, more or less */
+#pragma pack(1)
+struct intel_sdvo_dtd {
+ struct {
+ u16 clock; /**< pixel clock, in 10kHz units */
+ u8 h_active; /**< lower 8 bits (pixels) */
+ u8 h_blank; /**< lower 8 bits (pixels) */
+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
+ u8 v_active; /**< lower 8 bits (lines) */
+ u8 v_blank; /**< lower 8 bits (lines) */
+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
+ } part1;
+
+ struct {
+ u8 h_sync_off; /**< lower 8 bits, from hblank start */
+ u8 h_sync_width; /**< lower 8 bits (pixels) */
+ /** lower 4 bits each vsync offset, vsync width */
+ u8 v_sync_off_width;
+ /**
+ * 2 high bits of hsync offset, 2 high bits of hsync width,
+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+ */
+ u8 sync_off_width_high;
+ u8 dtd_flags;
+ u8 sdvo_flags;
+ /** bits 6-7 of vsync offset at bits 6-7 */
+ u8 v_sync_off_high;
+ u8 reserved;
+ } part2;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct intel_sdvo_pixel_clock_range {
+ u16 min; /**< pixel clock, in 10kHz units */
+ u16 max; /**< pixel clock, in 10kHz units */
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct intel_sdvo_preferred_input_timing_args {
+ u16 clock;
+ u16 width;
+ u16 height;
+ u8 interlace:1;
+ u8 scaled:1;
+ u8 pad:6;
+} __attribute__((packed));
+#pragma pack()
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0 0x07
+#define SDVO_I2C_ARG_1 0x06
+#define SDVO_I2C_ARG_2 0x05
+#define SDVO_I2C_ARG_3 0x04
+#define SDVO_I2C_ARG_4 0x03
+#define SDVO_I2C_ARG_5 0x02
+#define SDVO_I2C_ARG_6 0x01
+#define SDVO_I2C_ARG_7 0x00
+#define SDVO_I2C_OPCODE 0x08
+#define SDVO_I2C_CMD_STATUS 0x09
+#define SDVO_I2C_RETURN_0 0x0a
+#define SDVO_I2C_RETURN_1 0x0b
+#define SDVO_I2C_RETURN_2 0x0c
+#define SDVO_I2C_RETURN_3 0x0d
+#define SDVO_I2C_RETURN_4 0x0e
+#define SDVO_I2C_RETURN_5 0x0f
+#define SDVO_I2C_RETURN_6 0x10
+#define SDVO_I2C_RETURN_7 0x11
+#define SDVO_I2C_VENDOR_BEGIN 0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON 0x0
+#define SDVO_CMD_STATUS_SUCCESS 0x1
+#define SDVO_CMD_STATUS_NOTSUPP 0x2
+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
+#define SDVO_CMD_STATUS_PENDING 0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET 0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
+#pragma pack(1)
+struct intel_sdvo_get_trained_inputs_response {
+ unsigned int input0_trained:1;
+ unsigned int input1_trained:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+#pragma pack()
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
+struct intel_sdvo_in_out_map {
+ u16 in0, in1;
+};
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
+#pragma pack(1)
+struct intel_sdvo_get_interrupt_event_source_response {
+ u16 interrupt_status;
+ unsigned int ambient_light_interrupt:1;
+ unsigned int hdmi_audio_encrypt_change:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+#pragma pack()
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT 0x10
+#pragma pack(1)
+struct intel_sdvo_set_target_input_args {
+ unsigned int target_1:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+#pragma pack()
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targetted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
+/** 5 bytes of bit flags for TV formats shared by all TV format functions */
+#pragma pack(1)
+struct intel_sdvo_tv_format {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:3;
+} __attribute__((packed));
+#pragma pack()
+
+#define SDVO_CMD_GET_TV_FORMAT 0x28
+
+#define SDVO_CMD_SET_TV_FORMAT 0x29
+
+/** Returns the resolutiosn that can be used with the given TV format */
+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
+#pragma pack(1)
+struct intel_sdvo_sdtv_resolution_request {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct intel_sdvo_sdtv_resolution_reply {
+ unsigned int res_320x200:1;
+ unsigned int res_320x240:1;
+ unsigned int res_400x300:1;
+ unsigned int res_640x350:1;
+ unsigned int res_640x400:1;
+ unsigned int res_640x480:1;
+ unsigned int res_704x480:1;
+ unsigned int res_704x576:1;
+
+ unsigned int res_720x350:1;
+ unsigned int res_720x400:1;
+ unsigned int res_720x480:1;
+ unsigned int res_720x540:1;
+ unsigned int res_720x576:1;
+ unsigned int res_768x576:1;
+ unsigned int res_800x600:1;
+ unsigned int res_832x624:1;
+
+ unsigned int res_920x766:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x1024:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+#pragma pack()
+
+/* Get supported resolution with squire pixel aspect ratio that can be
+ scaled for the requested HDTV format */
+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
+
+#pragma pack(1)
+struct intel_sdvo_hdtv_resolution_request {
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct intel_sdvo_hdtv_resolution_reply {
+ unsigned int res_640x480:1;
+ unsigned int res_800x600:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x960:1;
+ unsigned int res_1400x1050:1;
+ unsigned int res_1600x1200:1;
+ unsigned int res_1920x1440:1;
+ unsigned int res_2048x1536:1;
+
+ unsigned int res_2560x1920:1;
+ unsigned int res_3200x2400:1;
+ unsigned int res_3840x2880:1;
+ unsigned int pad1:5;
+
+ unsigned int res_848x480:1;
+ unsigned int res_1064x600:1;
+ unsigned int res_1280x720:1;
+ unsigned int res_1360x768:1;
+ unsigned int res_1704x960:1;
+ unsigned int res_1864x1050:1;
+ unsigned int res_1920x1080:1;
+ unsigned int res_2128x1200:1;
+
+ unsigned int res_2560x1400:1;
+ unsigned int res_2728x1536:1;
+ unsigned int res_3408x1920:1;
+ unsigned int res_4264x2400:1;
+ unsigned int res_5120x2880:1;
+ unsigned int pad2:3;
+
+ unsigned int res_768x480:1;
+ unsigned int res_960x600:1;
+ unsigned int res_1152x720:1;
+ unsigned int res_1124x768:1;
+ unsigned int res_1536x960:1;
+ unsigned int res_1680x1050:1;
+ unsigned int res_1728x1080:1;
+ unsigned int res_1920x1200:1;
+
+ unsigned int res_2304x1440:1;
+ unsigned int res_2456x1536:1;
+ unsigned int res_3072x1920:1;
+ unsigned int res_3840x2400:1;
+ unsigned int res_4608x2880:1;
+ unsigned int pad3:3;
+
+ unsigned int res_1280x1024:1;
+ unsigned int pad4:7;
+
+ unsigned int res_1280x768:1;
+ unsigned int pad5:7;
+} __attribute__((packed));
+#pragma pack()
+
+/* Get supported power state returns info for encoder and monitor, rely on
+ last SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
+/* Get power state returns info for encoder and monitor, rely on last
+ SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_POWER_STATE 0x2b
+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
+# define SDVO_ENCODER_STATE_ON (1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
+# define SDVO_ENCODER_STATE_OFF (1 << 3)
+# define SDVO_MONITOR_STATE_ON (1 << 4)
+# define SDVO_MONITOR_STATE_STANDBY (1 << 5)
+# define SDVO_MONITOR_STATE_SUSPEND (1 << 6)
+# define SDVO_MONITOR_STATE_OFF (1 << 7)
+
+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
+/**
+ * The panel power sequencing parameters are in units of milliseconds.
+ * The high fields are bits 8:9 of the 10-bit values.
+ */
+#pragma pack(1)
+struct sdvo_panel_power_sequencing {
+ u8 t0;
+ u8 t1;
+ u8 t2;
+ u8 t3;
+ u8 t4;
+
+ unsigned int t0_high:2;
+ unsigned int t1_high:2;
+ unsigned int t2_high:2;
+ unsigned int t3_high:2;
+
+ unsigned int t4_high:2;
+ unsigned int pad:6;
+} __attribute__((packed));
+#pragma pack()
+
+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
+#pragma pack(1)
+struct sdvo_max_backlight_reply {
+ u8 max_value;
+ u8 default_value;
+} __attribute__((packed));
+#pragma pack()
+
+#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
+#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
+
+#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
+#pragma pack(1)
+struct sdvo_get_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ u16 value;
+} __attribute__((packed));
+#pragma pack()
+#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
+#pragma pack(1)
+struct sdvo_set_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ unsigned int enable:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+#pragma pack()
+
+/* Set display power state */
+#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
+# define SDVO_DISPLAY_STATE_ON (1 << 0)
+# define SDVO_DISPLAY_STATE_STANDBY (1 << 1)
+# define SDVO_DISPLAY_STATE_SUSPEND (1 << 2)
+# define SDVO_DISPLAY_STATE_OFF (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
+#pragma pack(1)
+struct intel_sdvo_enhancements_reply {
+ unsigned int flicker_filter:1;
+ unsigned int flicker_filter_adaptive:1;
+ unsigned int flicker_filter_2d:1;
+ unsigned int saturation:1;
+ unsigned int hue:1;
+ unsigned int brightness:1;
+ unsigned int contrast:1;
+ unsigned int overscan_h:1;
+
+ unsigned int overscan_v:1;
+ unsigned int hpos:1;
+ unsigned int vpos:1;
+ unsigned int sharpness:1;
+ unsigned int dot_crawl:1;
+ unsigned int dither:1;
+ unsigned int tv_chroma_filter:1;
+ unsigned int tv_luma_filter:1;
+} __attribute__((packed));
+#pragma pack()
+
+/* Picture enhancement limits below are dependent on the current TV format,
+ * and thus need to be queried and set after it.
+ */
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
+#define SDVO_CMD_GET_MAX_SATURATION 0x55
+#define SDVO_CMD_GET_MAX_HUE 0x58
+#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
+#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
+#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
+#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
+#define SDVO_CMD_GET_MAX_HPOS 0x67
+#define SDVO_CMD_GET_MAX_VPOS 0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
+#pragma pack(1)
+struct intel_sdvo_enhancement_limits_reply {
+ u16 max_value;
+ u16 default_value;
+} __attribute__((packed));
+#pragma pack()
+
+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
+# define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0)
+# define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0)
+# define SDVO_LVDS_CONNECTOR_SPWG (0 << 2)
+# define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2)
+# define SDVO_LVDS_SINGLE_CHANNEL (0 << 4)
+# define SDVO_LVDS_DUAL_CHANNEL (1 << 4)
+
+#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
+#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
+#define SDVO_CMD_GET_SATURATION 0x56
+#define SDVO_CMD_SET_SATURATION 0x57
+#define SDVO_CMD_GET_HUE 0x59
+#define SDVO_CMD_SET_HUE 0x5a
+#define SDVO_CMD_GET_BRIGHTNESS 0x5c
+#define SDVO_CMD_SET_BRIGHTNESS 0x5d
+#define SDVO_CMD_GET_CONTRAST 0x5f
+#define SDVO_CMD_SET_CONTRAST 0x60
+#define SDVO_CMD_GET_OVERSCAN_H 0x62
+#define SDVO_CMD_SET_OVERSCAN_H 0x63
+#define SDVO_CMD_GET_OVERSCAN_V 0x65
+#define SDVO_CMD_SET_OVERSCAN_V 0x66
+#define SDVO_CMD_GET_HPOS 0x68
+#define SDVO_CMD_SET_HPOS 0x69
+#define SDVO_CMD_GET_VPOS 0x6b
+#define SDVO_CMD_SET_VPOS 0x6c
+#define SDVO_CMD_GET_SHARPNESS 0x6e
+#define SDVO_CMD_SET_SHARPNESS 0x6f
+#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
+#pragma pack(1)
+struct intel_sdvo_enhancements_arg {
+ u16 value;
+}__attribute__((packed));
+#pragma pack()
+
+#define SDVO_CMD_GET_DOT_CRAWL 0x70
+#define SDVO_CMD_SET_DOT_CRAWL 0x71
+# define SDVO_DOT_CRAWL_ON (1 << 0)
+# define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_GET_DITHER 0x72
+#define SDVO_CMD_SET_DITHER 0x73
+# define SDVO_DITHER_ON (1 << 0)
+# define SDVO_DITHER_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
+# define SDVO_CONTROL_BUS_PROM (1 << 0)
+# define SDVO_CONTROL_BUS_DDC1 (1 << 1)
+# define SDVO_CONTROL_BUS_DDC2 (1 << 2)
+# define SDVO_CONTROL_BUS_DDC3 (1 << 3)
+
+/* HDMI op codes */
+#define SDVO_CMD_GET_SUPP_ENCODE 0x9d
+#define SDVO_CMD_GET_ENCODE 0x9e
+#define SDVO_CMD_SET_ENCODE 0x9f
+ #define SDVO_ENCODE_DVI 0x0
+ #define SDVO_ENCODE_HDMI 0x1
+#define SDVO_CMD_SET_PIXEL_REPLI 0x8b
+#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
+#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
+#define SDVO_CMD_SET_COLORIMETRY 0x8e
+ #define SDVO_COLORIMETRY_RGB256 0x0
+ #define SDVO_COLORIMETRY_RGB220 0x1
+ #define SDVO_COLORIMETRY_YCrCb422 0x3
+ #define SDVO_COLORIMETRY_YCrCb444 0x4
+#define SDVO_CMD_GET_COLORIMETRY 0x8f
+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
+#define SDVO_CMD_SET_AUDIO_STAT 0x91
+#define SDVO_CMD_GET_AUDIO_STAT 0x92
+#define SDVO_CMD_SET_HBUF_INDEX 0x93
+#define SDVO_HBUF_INDEX_ELD 0
+#define SDVO_HBUF_INDEX_AVI_IF 1
+#define SDVO_CMD_GET_HBUF_INDEX 0x94
+#define SDVO_CMD_GET_HBUF_INFO 0x95
+#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
+#define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97
+#define SDVO_CMD_SET_HBUF_DATA 0x98
+#define SDVO_CMD_GET_HBUF_DATA 0x99
+#define SDVO_CMD_SET_HBUF_TXRATE 0x9a
+#define SDVO_CMD_GET_HBUF_TXRATE 0x9b
+ #define SDVO_HBUF_TX_DISABLED (0 << 6)
+ #define SDVO_HBUF_TX_ONCE (2 << 6)
+ #define SDVO_HBUF_TX_VSYNC (3 << 6)
+#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
+#define SDVO_NEED_TO_STALL (1 << 7)
+
+#pragma pack(1)
+struct intel_sdvo_encode{
+ u8 dvi_rev;
+ u8 hdmi_rev;
+} __attribute__ ((packed));
+#pragma pack()
+#endif /* _INTEL_SDVO_REGS_H */
diff --git a/usr/src/uts/intel/io/i915/intel_sideband.c b/usr/src/uts/intel/io/i915/intel_sideband.c
new file mode 100644
index 0000000..fa32342
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_sideband.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/* IOSF sideband */
+static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
+ u32 port, u32 opcode, u32 addr, u32 *val)
+{
+ u32 cmd, be = 0xf, bar = 0;
+ bool is_read = (opcode == PUNIT_OPCODE_REG_READ ||
+ opcode == DPIO_OPCODE_REG_READ);
+
+ cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
+ (port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
+ (bar << IOSF_BAR_SHIFT);
+
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+ if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+ DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
+ is_read ? "read" : "write");
+ return -EAGAIN;
+ }
+
+ I915_WRITE(VLV_IOSF_ADDR, addr);
+ if (!is_read)
+ I915_WRITE(VLV_IOSF_DATA, *val);
+ I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
+
+ if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+ DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
+ is_read ? "read" : "write");
+ return -ETIMEDOUT;
+ }
+
+ if (is_read)
+ *val = I915_READ(VLV_IOSF_DATA);
+ I915_WRITE(VLV_IOSF_DATA, 0);
+
+ return 0;
+}
+
+u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
+{
+ u32 val = 0;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ mutex_lock(&dev_priv->dpio_lock);
+ vlv_sideband_rw(dev_priv, 16, IOSF_PORT_PUNIT,
+ PUNIT_OPCODE_REG_READ, addr, &val);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ return val;
+}
+
+void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ mutex_lock(&dev_priv->dpio_lock);
+ vlv_sideband_rw(dev_priv, 16, IOSF_PORT_PUNIT,
+ PUNIT_OPCODE_REG_WRITE, addr, &val);
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
+{
+ u32 val = 0;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ mutex_lock(&dev_priv->dpio_lock);
+ vlv_sideband_rw(dev_priv, 16, IOSF_PORT_NC,
+ PUNIT_OPCODE_REG_READ, addr, &val);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ return val;
+}
+
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
+ DPIO_OPCODE_REG_READ, reg, &val);
+
+ return val;
+}
+
+void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val)
+{
+ vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
+ DPIO_OPCODE_REG_WRITE, reg, &val);
+}
+
+/* SBI access */
+u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+ enum intel_sbi_destination destination)
+{
+ u32 value = 0;
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to become ready\n");
+ return 0;
+ }
+
+ I915_WRITE(SBI_ADDR, (reg << 16));
+
+ if (destination == SBI_ICLK)
+ value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ else
+ value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
+ return 0;
+ }
+
+ return I915_READ(SBI_DATA);
+}
+
+void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+ enum intel_sbi_destination destination)
+{
+ u32 tmp;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to become ready\n");
+ return;
+ }
+
+ I915_WRITE(SBI_ADDR, (reg << 16));
+ I915_WRITE(SBI_DATA, value);
+
+ if (destination == SBI_ICLK)
+ tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+ else
+ tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+ I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
+ return;
+ }
+}
diff --git a/usr/src/uts/intel/io/i915/intel_sprite.c b/usr/src/uts/intel/io/i915/intel_sprite.c
new file mode 100644
index 0000000..1422105
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_sprite.c
@@ -0,0 +1,1081 @@
+/*
+ * Copyright (c) 2011, 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * New plane/sprite handling.
+ *
+ * The older chips had a separate interface for programming plane related
+ * registers; newer ones are much simpler and we can use the new DRM plane
+ * support.
+ */
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fourcc.h"
+#include "drm_rect.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static void
+vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ int x, int y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ int pipe = intel_plane->pipe;
+ int plane = intel_plane->plane;
+ u32 sprctl;
+ unsigned long sprsurf_offset, linear_offset;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+ sprctl = I915_READ(SPCNTR(pipe, plane));
+
+ /* Mask out pixel format bits in case we change it */
+ sprctl &= ~SP_PIXFORMAT_MASK;
+ sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
+ sprctl &= ~SP_TILED;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_YUYV:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY;
+ break;
+ case DRM_FORMAT_RGB565:
+ sprctl |= SP_FORMAT_BGR565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ sprctl |= SP_FORMAT_BGRX8888;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ sprctl |= SP_FORMAT_BGRA8888;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ sprctl |= SP_FORMAT_RGBX1010102;
+ break;
+ case DRM_FORMAT_ABGR2101010:
+ sprctl |= SP_FORMAT_RGBA1010102;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ sprctl |= SP_FORMAT_RGBX8888;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ sprctl |= SP_FORMAT_RGBA8888;
+ break;
+ default:
+ /*
+ * If we get here one of the upper layers failed to filter
+ * out the unsupported plane formats
+ */
+ BUG();
+ break;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ sprctl |= SP_TILED;
+
+ sprctl |= SP_ENABLE;
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
+
+ I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
+ I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
+
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
+ obj->tiling_mode,
+ pixel_size,
+ fb->pitches[0]);
+ linear_offset -= sprsurf_offset;
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
+ else
+ I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
+
+ I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
+ I915_WRITE(SPCNTR(pipe, plane), sprctl);
+ I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
+ sprsurf_offset);
+ POSTING_READ(SPSURF(pipe, plane));
+}
+
+static void
+vlv_disable_plane(struct drm_plane *dplane)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ int pipe = intel_plane->pipe;
+ int plane = intel_plane->plane;
+
+ I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
+ ~SP_ENABLE);
+ /* Activate double buffered register update */
+ I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
+ POSTING_READ(SPSURF(pipe, plane));
+}
+
+static int
+vlv_update_colorkey(struct drm_plane *dplane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ int pipe = intel_plane->pipe;
+ int plane = intel_plane->plane;
+ u32 sprctl;
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
+ I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
+ I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
+ I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
+
+ sprctl = I915_READ(SPCNTR(pipe, plane));
+ sprctl &= ~SP_SOURCE_KEY;
+ if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SP_SOURCE_KEY;
+ I915_WRITE(SPCNTR(pipe, plane), sprctl);
+
+ POSTING_READ(SPKEYMSK(pipe, plane));
+
+ return 0;
+}
+
+static void
+vlv_get_colorkey(struct drm_plane *dplane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ int pipe = intel_plane->pipe;
+ int plane = intel_plane->plane;
+ u32 sprctl;
+
+ key->min_value = I915_READ(SPKEYMINVAL(pipe, plane));
+ key->max_value = I915_READ(SPKEYMAXVAL(pipe, plane));
+ key->channel_mask = I915_READ(SPKEYMSK(pipe, plane));
+
+ sprctl = I915_READ(SPCNTR(pipe, plane));
+ if (sprctl & SP_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static void
+ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ int x, int y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+ u32 sprctl, sprscale = 0;
+ unsigned long sprsurf_offset, linear_offset;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ bool scaling_was_enabled = (bool)dev_priv->sprite_scaling_enabled;
+
+ sprctl = I915_READ(SPRCTL(pipe));
+
+ /* Mask out pixel format bits in case we change it */
+ sprctl &= ~SPRITE_PIXFORMAT_MASK;
+ sprctl &= ~SPRITE_RGB_ORDER_RGBX;
+ sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
+ sprctl &= ~SPRITE_TILED;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ sprctl |= SPRITE_FORMAT_RGBX888;
+ break;
+ case DRM_FORMAT_YUYV:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
+ break;
+ default:
+ BUG();
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ sprctl |= SPRITE_TILED;
+
+ /* must disable */
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+ sprctl |= SPRITE_ENABLE;
+
+ if (IS_HASWELL(dev))
+ sprctl |= SPRITE_PIPE_CSC_ENABLE;
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
+
+ /*
+ * IVB workaround: must disable low power watermarks for at least
+ * one frame before enabling scaling. LP watermarks can be re-enabled
+ * when scaling is disabled.
+ */
+ if (crtc_w != src_w || crtc_h != src_h) {
+ dev_priv->sprite_scaling_enabled |= 1 << pipe;
+
+ if (!scaling_was_enabled) {
+ intel_update_watermarks(dev);
+ intel_wait_for_vblank(dev, pipe);
+ }
+ sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
+ } else
+ dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
+
+ I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ sprsurf_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ pixel_size, fb->pitches[0]);
+ linear_offset -= sprsurf_offset;
+
+ /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
+ * register */
+ if (IS_HASWELL(dev))
+ I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
+ else if (obj->tiling_mode != I915_TILING_NONE)
+ I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
+ else
+ I915_WRITE(SPRLINOFF(pipe), linear_offset);
+
+ I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ if (intel_plane->can_scale)
+ I915_WRITE(SPRSCALE(pipe), sprscale);
+ I915_WRITE(SPRCTL(pipe), sprctl);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
+ POSTING_READ(SPRSURF(pipe));
+
+ /* potentially re-enable LP watermarks */
+ if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
+ intel_update_watermarks(dev);
+}
+
+static void
+ivb_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+ bool scaling_was_enabled = (bool)dev_priv->sprite_scaling_enabled;
+
+ I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
+ /* Can't leave the scaler enabled... */
+ if (intel_plane->can_scale)
+ I915_WRITE(SPRSCALE(pipe), 0);
+ /* Activate double buffered register update */
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
+ POSTING_READ(SPRSURF(pipe));
+
+ dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
+
+ intel_update_sprite_watermarks(dev, pipe, 0, 0, false);
+
+ /* potentially re-enable LP watermarks */
+ if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
+ intel_update_watermarks(dev);
+}
+
+static int
+ivb_update_colorkey(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 sprctl;
+ int ret = 0;
+
+ intel_plane = to_intel_plane(plane);
+
+ I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
+ I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
+ I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
+
+ sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+ sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ sprctl |= SPRITE_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SPRITE_SOURCE_KEY;
+ I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
+
+ POSTING_READ(SPRKEYMSK(intel_plane->pipe));
+
+ return ret;
+}
+
+static void
+ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 sprctl;
+
+ intel_plane = to_intel_plane(plane);
+
+ key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
+ key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
+ key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
+ key->flags = 0;
+
+ sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+
+ if (sprctl & SPRITE_DEST_KEY)
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ else if (sprctl & SPRITE_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static void
+ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ int x, int y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+ unsigned long dvssurf_offset, linear_offset;
+ u32 dvscntr, dvsscale;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+ dvscntr = I915_READ(DVSCNTR(pipe));
+
+ /* Mask out pixel format bits in case we change it */
+ dvscntr &= ~DVS_PIXFORMAT_MASK;
+ dvscntr &= ~DVS_RGB_ORDER_XBGR;
+ dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
+ dvscntr &= ~DVS_TILED;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dvscntr |= DVS_FORMAT_RGBX888;
+ break;
+ case DRM_FORMAT_YUYV:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
+ break;
+ default:
+ BUG();
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dvscntr |= DVS_TILED;
+
+ if (IS_GEN6(dev))
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
+ dvscntr |= DVS_ENABLE;
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
+
+ dvsscale = 0;
+ if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
+ dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
+
+ I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
+ dvssurf_offset =
+ intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+ pixel_size, fb->pitches[0]);
+ linear_offset -= dvssurf_offset;
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+ else
+ I915_WRITE(DVSLINOFF(pipe), linear_offset);
+
+ I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE(DVSSCALE(pipe), dvsscale);
+ I915_WRITE(DVSCNTR(pipe), dvscntr);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
+ POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+ilk_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+
+ I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
+ /* Disable the scaler */
+ I915_WRITE(DVSSCALE(pipe), 0);
+ /* Flush double buffered register updates */
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
+ POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+intel_enable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = DSPCNTR(intel_crtc->plane);
+
+ if (!intel_crtc->primary_disabled)
+ return;
+
+ intel_crtc->primary_disabled = false;
+ intel_update_fbc(dev);
+
+ I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+}
+
+static void
+intel_disable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = DSPCNTR(intel_crtc->plane);
+
+ if (intel_crtc->primary_disabled)
+ return;
+
+ I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+
+ intel_crtc->primary_disabled = true;
+ intel_update_fbc(dev);
+}
+
+static int
+ilk_update_colorkey(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 dvscntr;
+ int ret = 0;
+
+ intel_plane = to_intel_plane(plane);
+
+ I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
+ I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
+ I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
+
+ dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+ dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ dvscntr |= DVS_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ dvscntr |= DVS_SOURCE_KEY;
+ I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
+
+ POSTING_READ(DVSKEYMSK(intel_plane->pipe));
+
+ return ret;
+}
+
+static void
+ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 dvscntr;
+
+ intel_plane = to_intel_plane(plane);
+
+ key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
+ key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
+ key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
+ key->flags = 0;
+
+ dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+
+ if (dvscntr & DVS_DEST_KEY)
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ else if (dvscntr & DVS_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static bool
+format_is_yuv(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YVYU:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj, *old_obj;
+ int pipe = intel_plane->pipe;
+ enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+ pipe);
+ int ret = 0;
+ bool disable_primary = false;
+ bool visible;
+ int hscale, vscale;
+ int max_scale, min_scale;
+ int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+ struct drm_rect src = {
+ /* sample coordinates in 16.16 fixed point */
+ .x1 = src_x,
+ .x2 = src_x + src_w,
+ .y1 = src_y,
+ .y2 = src_y + src_h,
+ };
+ struct drm_rect dst = {
+ /* integer pixels */
+ .x1 = crtc_x,
+ .x2 = crtc_x + crtc_w,
+ .y1 = crtc_y,
+ .y2 = crtc_y + crtc_h,
+ };
+ const struct drm_rect clip = {
+ .x2 = crtc->mode.hdisplay,
+ .y2 = crtc->mode.vdisplay,
+ };
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ old_obj = intel_plane->obj;
+
+ intel_plane->crtc_x = crtc_x;
+ intel_plane->crtc_y = crtc_y;
+ intel_plane->crtc_w = crtc_w;
+ intel_plane->crtc_h = crtc_h;
+ intel_plane->src_x = src_x;
+ intel_plane->src_y = src_y;
+ intel_plane->src_w = src_w;
+ intel_plane->src_h = src_h;
+
+ /* Pipe must be running... */
+ if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) {
+ DRM_DEBUG_KMS("Pipe disabled\n");
+ return -EINVAL;
+ }
+
+ /* Don't modify another pipe's plane */
+ if (intel_plane->pipe != intel_crtc->pipe) {
+ DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n");
+ return -EINVAL;
+ }
+
+ /* FIXME check all gen limits */
+ if (fb->width < 3 || fb->height < 3 || fb->pitches[0] > 16384) {
+ DRM_DEBUG_KMS("Unsuitable framebuffer for plane\n");
+ return -EINVAL;
+ }
+
+ /* Sprite planes can be linear or x-tiled surfaces */
+ switch (obj->tiling_mode) {
+ case I915_TILING_NONE:
+ case I915_TILING_X:
+ break;
+ default:
+ DRM_DEBUG_KMS("Unsupported tiling mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * FIXME the following code does a bunch of fuzzy adjustments to the
+ * coordinates and sizes. We probably need some way to decide whether
+ * more strict checking should be done instead.
+ */
+ max_scale = intel_plane->max_downscale << 16;
+ min_scale = intel_plane->can_scale ? 1 : (1 << 16);
+
+ hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
+ BUG_ON(hscale < 0);
+
+ vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
+ BUG_ON(vscale < 0);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
+
+ crtc_x = dst.x1;
+ crtc_y = dst.y1;
+ crtc_w = drm_rect_width(&dst);
+ crtc_h = drm_rect_height(&dst);
+
+ if (visible) {
+ /* check again in case clipping clamped the results */
+ hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
+ if (hscale < 0) {
+ DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
+ drm_rect_debug_print(&src, true);
+ drm_rect_debug_print(&dst, false);
+
+ return hscale;
+ }
+
+ vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
+ if (vscale < 0) {
+ DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
+ drm_rect_debug_print(&src, true);
+ drm_rect_debug_print(&dst, false);
+
+ return vscale;
+ }
+
+ /* Make the source viewport size an exact multiple of the scaling factors. */
+ drm_rect_adjust_size(&src,
+ drm_rect_width(&dst) * hscale - drm_rect_width(&src),
+ drm_rect_height(&dst) * vscale - drm_rect_height(&src));
+
+ /* sanity check to make sure the src viewport wasn't enlarged */
+ WARN_ON(src.x1 < (int) src_x ||
+ src.y1 < (int) src_y ||
+ src.x2 > (int) (src_x + src_w) ||
+ src.y2 > (int) (src_y + src_h));
+
+ /*
+ * Hardware doesn't handle subpixel coordinates.
+ * Adjust to (macro)pixel boundary, but be careful not to
+ * increase the source viewport size, because that could
+ * push the downscaling factor out of bounds.
+ */
+ src_x = src.x1 >> 16;
+ src_w = drm_rect_width(&src) >> 16;
+ src_y = src.y1 >> 16;
+ src_h = drm_rect_height(&src) >> 16;
+
+ if (format_is_yuv(fb->pixel_format)) {
+ src_x &= ~1;
+ src_w &= ~1;
+
+ /*
+ * Must keep src and dst the
+ * same if we can't scale.
+ */
+ if (!intel_plane->can_scale)
+ crtc_w &= ~1;
+
+ if (crtc_w == 0)
+ visible = false;
+ }
+ }
+
+ /* Check size restrictions when scaling */
+ if (visible && (src_w != crtc_w || src_h != crtc_h)) {
+ unsigned int width_bytes;
+
+ WARN_ON(!intel_plane->can_scale);
+
+ /* FIXME interlacing min height is 6 */
+
+ if (crtc_w < 3 || crtc_h < 3)
+ visible = false;
+
+ if (src_w < 3 || src_h < 3)
+ visible = false;
+
+ width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
+
+ if (src_w > 2048 || src_h > 2048 ||
+ width_bytes > 4096 || fb->pitches[0] > 4096) {
+ DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
+ return -EINVAL;
+ }
+ }
+
+ dst.x1 = crtc_x;
+ dst.x2 = crtc_x + crtc_w;
+ dst.y1 = crtc_y;
+ dst.y2 = crtc_y + crtc_h;
+
+ /*
+ * If the sprite is completely covering the primary plane,
+ * we can disable the primary and save power.
+ */
+ disable_primary = drm_rect_equals(&dst, &clip);
+ WARN_ON(disable_primary && !visible);
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* Note that this will apply the VT-d workaround for scanouts,
+ * which is more restrictive than required for sprites. (The
+ * primary plane requires 256KiB alignment with 64 PTE padding,
+ * the sprite planes only require 128KiB alignment and 32 PTE padding.
+ */
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret)
+ goto out_unlock;
+
+ intel_plane->obj = obj;
+
+ /*
+ * Be sure to re-enable the primary before the sprite is no longer
+ * covering it fully.
+ */
+ if (!disable_primary)
+ intel_enable_primary(crtc);
+
+ if (visible)
+ intel_plane->update_plane(plane, fb, obj,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+ else
+ intel_plane->disable_plane(plane);
+
+ if (disable_primary)
+ intel_disable_primary(crtc);
+
+ /* Unpin old obj after new one is active to avoid ugliness */
+ if (old_obj) {
+ /*
+ * It's fairly common to simply update the position of
+ * an existing object. In that case, we don't need to
+ * wait for vblank to avoid ugliness, we only need to
+ * do the pin & ref bookkeeping.
+ */
+ if (old_obj != obj) {
+ mutex_unlock(&dev->struct_mutex);
+ intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+ mutex_lock(&dev->struct_mutex);
+ }
+ intel_unpin_fb_obj(old_obj);
+ }
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static int
+intel_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int ret = 0;
+
+ if (plane->crtc)
+ intel_enable_primary(plane->crtc);
+ intel_plane->disable_plane(plane);
+
+ if (!intel_plane->obj)
+ goto out;
+
+ intel_wait_for_vblank(dev, intel_plane->pipe);
+
+ mutex_lock(&dev->struct_mutex);
+ intel_unpin_fb_obj(intel_plane->obj);
+ intel_plane->obj = NULL;
+ mutex_unlock(&dev->struct_mutex);
+out:
+
+ return ret;
+}
+
+static void intel_destroy_plane(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ intel_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(intel_plane, sizeof(struct intel_plane));
+}
+
+int intel_sprite_set_colorkey(DRM_IOCTL_ARGS)
+{
+ struct drm_intel_sprite_colorkey *set = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ /* Make sure we don't try to enable both src & dest simultaneously */
+ if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ plane = obj_to_plane(obj);
+ intel_plane = to_intel_plane(plane);
+ ret = intel_plane->update_colorkey(plane, set);
+
+out_unlock:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+int intel_sprite_get_colorkey(DRM_IOCTL_ARGS)
+{
+ struct drm_intel_sprite_colorkey *get = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ plane = obj_to_plane(obj);
+ intel_plane = to_intel_plane(plane);
+ intel_plane->get_colorkey(plane, get);
+
+out_unlock:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+void intel_plane_restore(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+
+ if (!plane->crtc || !plane->fb)
+ return;
+
+ intel_update_plane(plane, plane->crtc, plane->fb,
+ intel_plane->crtc_x, intel_plane->crtc_y,
+ intel_plane->crtc_w, intel_plane->crtc_h,
+ intel_plane->src_x, intel_plane->src_y,
+ intel_plane->src_w, intel_plane->src_h);
+}
+
+void intel_plane_disable(struct drm_plane *plane)
+{
+ if (!plane->crtc || !plane->fb)
+ return;
+
+ intel_disable_plane(plane);
+}
+
+static const struct drm_plane_funcs intel_plane_funcs = {
+ .update_plane = intel_update_plane,
+ .disable_plane = intel_disable_plane,
+ .destroy = intel_destroy_plane,
+};
+
+static uint32_t ilk_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+static uint32_t snb_plane_formats[] = {
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+static uint32_t vlv_plane_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+int
+intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
+{
+ struct intel_plane *intel_plane;
+ unsigned long possible_crtcs;
+ const uint32_t *plane_formats;
+ int num_plane_formats;
+ int ret;
+
+ if (INTEL_INFO(dev)->gen < 5)
+ return -ENODEV;
+
+ intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
+ if (!intel_plane)
+ return -ENOMEM;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 5:
+ case 6:
+ intel_plane->can_scale = true;
+ intel_plane->max_downscale = 16;
+ intel_plane->update_plane = ilk_update_plane;
+ intel_plane->disable_plane = ilk_disable_plane;
+ intel_plane->update_colorkey = ilk_update_colorkey;
+ intel_plane->get_colorkey = ilk_get_colorkey;
+
+ if (IS_GEN6(dev)) {
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ plane_formats = ilk_plane_formats;
+ num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
+ }
+ break;
+
+ case 7:
+ if (IS_IVYBRIDGE(dev)) {
+ intel_plane->can_scale = true;
+ intel_plane->max_downscale = 2;
+ } else {
+ intel_plane->can_scale = false;
+ intel_plane->max_downscale = 1;
+ }
+
+ if (IS_VALLEYVIEW(dev)) {
+ intel_plane->update_plane = vlv_update_plane;
+ intel_plane->disable_plane = vlv_disable_plane;
+ intel_plane->update_colorkey = vlv_update_colorkey;
+ intel_plane->get_colorkey = vlv_get_colorkey;
+
+ plane_formats = vlv_plane_formats;
+ num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+ } else {
+ intel_plane->update_plane = ivb_update_plane;
+ intel_plane->disable_plane = ivb_disable_plane;
+ intel_plane->update_colorkey = ivb_update_colorkey;
+ intel_plane->get_colorkey = ivb_get_colorkey;
+
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ }
+ break;
+
+ default:
+ kfree(intel_plane, sizeof(struct intel_plane));
+ return -ENODEV;
+ }
+
+ intel_plane->pipe = pipe;
+ intel_plane->plane = plane;
+ possible_crtcs = (1 << pipe);
+ ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
+ &intel_plane_funcs,
+ plane_formats, num_plane_formats,
+ false);
+ if (ret)
+ kfree(intel_plane, sizeof(struct intel_plane));
+
+ return ret;
+}
+
diff --git a/usr/src/uts/intel/io/i915/intel_tv.c b/usr/src/uts/intel/io/i915/intel_tv.c
new file mode 100644
index 0000000..82d6b5f
--- /dev/null
+++ b/usr/src/uts/intel/io/i915/intel_tv.c
@@ -0,0 +1,1683 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006-2013 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file
+ * Integrated TV-out support for the 915GM and 945GM.
+ */
+
+#include <sys/ddi.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+enum tv_margin {
+ TV_MARGIN_LEFT, TV_MARGIN_TOP,
+ TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM
+};
+
+/** Private structure for the integrated TV support */
+struct intel_tv {
+ struct intel_encoder base;
+
+ int type;
+ const char *tv_format;
+ int margin[4];
+ u32 save_TV_H_CTL_1;
+ u32 save_TV_H_CTL_2;
+ u32 save_TV_H_CTL_3;
+ u32 save_TV_V_CTL_1;
+ u32 save_TV_V_CTL_2;
+ u32 save_TV_V_CTL_3;
+ u32 save_TV_V_CTL_4;
+ u32 save_TV_V_CTL_5;
+ u32 save_TV_V_CTL_6;
+ u32 save_TV_V_CTL_7;
+ u32 save_TV_SC_CTL_1, save_TV_SC_CTL_2, save_TV_SC_CTL_3;
+
+ u32 save_TV_CSC_Y;
+ u32 save_TV_CSC_Y2;
+ u32 save_TV_CSC_U;
+ u32 save_TV_CSC_U2;
+ u32 save_TV_CSC_V;
+ u32 save_TV_CSC_V2;
+ u32 save_TV_CLR_KNOBS;
+ u32 save_TV_CLR_LEVEL;
+ u32 save_TV_WIN_POS;
+ u32 save_TV_WIN_SIZE;
+ u32 save_TV_FILTER_CTL_1;
+ u32 save_TV_FILTER_CTL_2;
+ u32 save_TV_FILTER_CTL_3;
+
+ u32 save_TV_H_LUMA[60];
+ u32 save_TV_H_CHROMA[60];
+ u32 save_TV_V_LUMA[43];
+ u32 save_TV_V_CHROMA[43];
+
+ u32 save_TV_DAC;
+ u32 save_TV_CTL;
+};
+
+struct video_levels {
+ int blank, black, burst;
+};
+
+struct color_conversion {
+ u16 ry, gy, by, ay;
+ u16 ru, gu, bu, au;
+ u16 rv, gv, bv, av;
+};
+
+static const u32 filter_table[] = {
+ 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+ 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+ 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+ 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+ 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+ 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+ 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+ 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+ 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+ 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+ 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+ 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+ 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+ 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+ 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+ 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+ 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+ 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+ 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+ 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+ 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+ 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+ 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+ 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+ 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+ 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+ 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+ 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+ 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+ 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+ 0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0,
+ 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+ 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+ 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+ 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+ 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+ 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+ 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+ 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+ 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+ 0x28003100, 0x28002F00, 0x00003100, 0x36403000,
+ 0x2D002CC0, 0x30003640, 0x2D0036C0,
+ 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+ 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+ 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+ 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+ 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+ 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+ 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+ 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+ 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+ 0x28003100, 0x28002F00, 0x00003100,
+};
+
+/*
+ * Color conversion values have 3 separate fixed point formats:
+ *
+ * 10 bit fields (ay, au)
+ * 1.9 fixed point (b.bbbbbbbbb)
+ * 11 bit fields (ry, by, ru, gu, gv)
+ * exp.mantissa (ee.mmmmmmmmm)
+ * ee = 00 = 10^-1 (0.mmmmmmmmm)
+ * ee = 01 = 10^-2 (0.0mmmmmmmmm)
+ * ee = 10 = 10^-3 (0.00mmmmmmmmm)
+ * ee = 11 = 10^-4 (0.000mmmmmmmmm)
+ * 12 bit fields (gy, rv, bu)
+ * exp.mantissa (eee.mmmmmmmmm)
+ * eee = 000 = 10^-1 (0.mmmmmmmmm)
+ * eee = 001 = 10^-2 (0.0mmmmmmmmm)
+ * eee = 010 = 10^-3 (0.00mmmmmmmmm)
+ * eee = 011 = 10^-4 (0.000mmmmmmmmm)
+ * eee = 100 = reserved
+ * eee = 101 = reserved
+ * eee = 110 = reserved
+ * eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation)
+ *
+ * Saturation and contrast are 8 bits, with their own representation:
+ * 8 bit field (saturation, contrast)
+ * exp.mantissa (ee.mmmmmm)
+ * ee = 00 = 10^-1 (0.mmmmmm)
+ * ee = 01 = 10^0 (m.mmmmm)
+ * ee = 10 = 10^1 (mm.mmmm)
+ * ee = 11 = 10^2 (mmm.mmm)
+ *
+ * Simple conversion function:
+ *
+ * static u32
+ * float_to_csc_11(float f)
+ * {
+ * u32 exp;
+ * u32 mant;
+ * u32 ret;
+ *
+ * if (f < 0)
+ * f = -f;
+ *
+ * if (f >= 1) {
+ * exp = 0x7;
+ * mant = 1 << 8;
+ * } else {
+ * for (exp = 0; exp < 3 && f < 0.5; exp++)
+ * f *= 2.0;
+ * mant = (f * (1 << 9) + 0.5);
+ * if (mant >= (1 << 9))
+ * mant = (1 << 9) - 1;
+ * }
+ * ret = (exp << 9) | mant;
+ * return ret;
+ * }
+ */
+
+/*
+ * Behold, magic numbers! If we plant them they might grow a big
+ * s-video cable to the sky... or something.
+ *
+ * Pre-converted to appropriate hex value.
+ */
+
+/*
+ * PAL & NTSC values for composite & s-video connections
+ */
+static const struct color_conversion ntsc_m_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_m_levels_composite = {
+ .blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion ntsc_m_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_m_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion ntsc_j_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
+ .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
+ .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_j_levels_composite = {
+ .blank = 225, .black = 225, .burst = 113,
+};
+
+static const struct color_conversion ntsc_j_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
+ .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
+ .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_j_levels_svideo = {
+ .blank = 266, .black = 266, .burst = 133,
+};
+
+static const struct color_conversion pal_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
+ .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
+ .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
+};
+
+static const struct video_levels pal_levels_composite = {
+ .blank = 237, .black = 237, .burst = 118,
+};
+
+static const struct color_conversion pal_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+ .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
+ .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
+};
+
+static const struct video_levels pal_levels_svideo = {
+ .blank = 280, .black = 280, .burst = 139,
+};
+
+static const struct color_conversion pal_m_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels pal_m_levels_composite = {
+ .blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion pal_m_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels pal_m_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion pal_n_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels pal_n_levels_composite = {
+ .blank = 225, .black = 267, .burst = 118,
+};
+
+static const struct color_conversion pal_n_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels pal_n_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 139,
+};
+
+/*
+ * Component connections
+ */
+static const struct color_conversion sdtv_csc_yprpb = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+ .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
+ .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
+};
+
+static const struct color_conversion sdtv_csc_rgb = {
+ .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
+ .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
+ .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
+};
+
+static const struct color_conversion hdtv_csc_yprpb = {
+ .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
+ .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
+ .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
+};
+
+static const struct color_conversion hdtv_csc_rgb = {
+ .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
+ .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
+ .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
+};
+
+static const struct video_levels component_levels = {
+ .blank = 279, .black = 279, .burst = 0,
+};
+
+
+struct tv_mode {
+ const char *name;
+ int clock;
+ int refresh; /* in millihertz (for precision) */
+ u32 oversample;
+ int hsync_end, hblank_start, hblank_end, htotal;
+ bool progressive, trilevel_sync, component_only;
+ int vsync_start_f1, vsync_start_f2, vsync_len;
+ bool veq_ena;
+ int veq_start_f1, veq_start_f2, veq_len;
+ int vi_end_f1, vi_end_f2, nbr_end;
+ bool burst_ena;
+ int hburst_start, hburst_len;
+ int vburst_start_f1, vburst_end_f1;
+ int vburst_start_f2, vburst_end_f2;
+ int vburst_start_f3, vburst_end_f3;
+ int vburst_start_f4, vburst_end_f4;
+ /*
+ * subcarrier programming
+ */
+ int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc;
+ u32 sc_reset;
+ bool pal_burst;
+ /*
+ * blank/black levels
+ */
+ const struct video_levels *composite_levels, *svideo_levels;
+ const struct color_conversion *composite_color, *svideo_color;
+ const u32 *filter_table;
+ int max_srcw;
+};
+
+
+/*
+ * Sub carrier DDA
+ *
+ * I think this works as follows:
+ *
+ * subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096
+ *
+ * Presumably, when dda3 is added in, it gets to adjust the dda2_inc value
+ *
+ * So,
+ * dda1_ideal = subcarrier/pixel * 4096
+ * dda1_inc = floor (dda1_ideal)
+ * dda2 = dda1_ideal - dda1_inc
+ *
+ * then pick a ratio for dda2 that gives the closest approximation. If
+ * you can't get close enough, you can play with dda3 as well. This
+ * seems likely to happen when dda2 is small as the jumps would be larger
+ *
+ * To invert this,
+ *
+ * pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size)
+ *
+ * The constants below were all computed using a 107.520MHz clock
+ */
+
+/**
+ * Register programming values for TV modes.
+ *
+ * These values account for -1s required.
+ */
+
+static const struct tv_mode tv_modes[] = {
+ {
+ .name = "NTSC-M",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 20800, .dda2_size = 27456,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_4,
+ .pal_burst = false,
+
+ .composite_levels = &ntsc_m_levels_composite,
+ .composite_color = &ntsc_m_csc_composite,
+ .svideo_levels = &ntsc_m_levels_svideo,
+ .svideo_color = &ntsc_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "NTSC-443",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 168,
+ .dda2_inc = 4093, .dda2_size = 27456,
+ .dda3_inc = 310, .dda3_size = 525,
+ .sc_reset = TV_SC_RESET_NEVER,
+ .pal_burst = false,
+
+ .composite_levels = &ntsc_m_levels_composite,
+ .composite_color = &ntsc_m_csc_composite,
+ .svideo_levels = &ntsc_m_levels_svideo,
+ .svideo_color = &ntsc_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "NTSC-J",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 20800, .dda2_size = 27456,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_4,
+ .pal_burst = false,
+
+ .composite_levels = &ntsc_j_levels_composite,
+ .composite_color = &ntsc_j_csc_composite,
+ .svideo_levels = &ntsc_j_levels_svideo,
+ .svideo_color = &ntsc_j_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "PAL-M",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 16704, .dda2_size = 27456,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &pal_m_levels_composite,
+ .composite_color = &pal_m_csc_composite,
+ .svideo_levels = &pal_m_levels_svideo,
+ .svideo_color = &pal_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+ .name = "PAL-N",
+ .clock = 108000,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ .hsync_end = 64, .hblank_end = 128,
+ .hblank_start = 844, .htotal = 863,
+
+ .progressive = false, .trilevel_sync = false,
+
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 24, .vi_end_f2 = 25,
+ .nbr_end = 286,
+
+ .burst_ena = true,
+ .hburst_start = 73, .hburst_len = 34,
+ .vburst_start_f1 = 8, .vburst_end_f1 = 285,
+ .vburst_start_f2 = 8, .vburst_end_f2 = 286,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 286,
+ .vburst_start_f4 = 9, .vburst_end_f4 = 285,
+
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 23578, .dda2_size = 27648,
+ .dda3_inc = 134, .dda3_size = 625,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &pal_n_levels_composite,
+ .composite_color = &pal_n_csc_composite,
+ .svideo_levels = &pal_n_levels_svideo,
+ .svideo_color = &pal_n_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+ .name = "PAL",
+ .clock = 108000,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ .hsync_end = 64, .hblank_end = 142,
+ .hblank_start = 844, .htotal = 863,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 5, .vsync_start_f2 = 6,
+ .vsync_len = 5,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 15,
+
+ .vi_end_f1 = 24, .vi_end_f2 = 25,
+ .nbr_end = 286,
+
+ .burst_ena = true,
+ .hburst_start = 73, .hburst_len = 32,
+ .vburst_start_f1 = 8, .vburst_end_f1 = 285,
+ .vburst_start_f2 = 8, .vburst_end_f2 = 286,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 286,
+ .vburst_start_f4 = 9, .vburst_end_f4 = 285,
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 168,
+ .dda2_inc = 4122, .dda2_size = 27648,
+ .dda3_inc = 67, .dda3_size = 625,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &pal_levels_composite,
+ .composite_color = &pal_csc_composite,
+ .svideo_levels = &pal_levels_svideo,
+ .svideo_color = &pal_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "480p",
+ .clock = 107520,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 122,
+ .hblank_start = 842, .htotal = 857,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 12, .vsync_start_f2 = 12,
+ .vsync_len = 12,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 479,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "576p",
+ .clock = 107520,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 139,
+ .hblank_start = 859, .htotal = 863,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 48, .vi_end_f2 = 48,
+ .nbr_end = 575,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "720p@60Hz",
+ .clock = 148800,
+ .refresh = 60000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 80, .hblank_end = 300,
+ .hblank_start = 1580, .htotal = 1649,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 29, .vi_end_f2 = 29,
+ .nbr_end = 719,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "720p@50Hz",
+ .clock = 148800,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 80, .hblank_end = 300,
+ .hblank_start = 1580, .htotal = 1979,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 29, .vi_end_f2 = 29,
+ .nbr_end = 719,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ .max_srcw = 800
+ },
+ {
+ .name = "1080i@50Hz",
+ .clock = 148800,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2639,
+
+ .progressive = false, .trilevel_sync = true,
+
+ .vsync_start_f1 = 4, .vsync_start_f2 = 5,
+ .vsync_len = 10,
+
+ .veq_ena = true, .veq_start_f1 = 4,
+ .veq_start_f2 = 4, .veq_len = 10,
+
+
+ .vi_end_f1 = 21, .vi_end_f2 = 22,
+ .nbr_end = 539,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "1080i@60Hz",
+ .clock = 148800,
+ .refresh = 60000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2199,
+
+ .progressive = false, .trilevel_sync = true,
+
+ .vsync_start_f1 = 4, .vsync_start_f2 = 5,
+ .vsync_len = 10,
+
+ .veq_ena = true, .veq_start_f1 = 4,
+ .veq_start_f2 = 4, .veq_len = 10,
+
+
+ .vi_end_f1 = 21, .vi_end_f2 = 22,
+ .nbr_end = 539,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+};
+
+static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_tv, base.base);
+}
+
+static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_tv,
+ base);
+}
+
+static bool
+intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ(TV_CTL);
+
+ if (!(tmp & TV_ENC_ENABLE))
+ return false;
+
+ *pipe = PORT_TO_PIPE(tmp);
+
+ return true;
+}
+
+static void
+intel_enable_tv(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+}
+
+static void
+intel_disable_tv(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
+}
+
+static const struct tv_mode *
+intel_tv_mode_lookup(const char *tv_format)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+ const struct tv_mode *tv_mode = &tv_modes[i];
+
+ if (!strcmp(tv_format, tv_mode->name))
+ return tv_mode;
+ }
+ return NULL;
+}
+
+static const struct tv_mode *
+intel_tv_mode_find(struct intel_tv *intel_tv)
+{
+ return intel_tv_mode_lookup(intel_tv->tv_format);
+}
+
+static int /* OSOL_i915 */
+intel_tv_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+ /* Ensure TV refresh is close to desired refresh */
+ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
+ < 1000)
+ return MODE_OK;
+
+ return MODE_CLOCK_RANGE;
+}
+
+
+static bool
+intel_tv_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_tv *intel_tv = enc_to_intel_tv(&encoder->base);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+ if (!tv_mode)
+ return false;
+
+ pipe_config->adjusted_mode.clock = tv_mode->clock;
+ DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
+ pipe_config->pipe_bpp = 8*3;
+
+ return true;
+}
+
+static void
+/* LINTED */
+intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ /* LINTED */
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ u32 tv_ctl;
+ u32 hctl1, hctl2, hctl3;
+ u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
+ u32 scctl1, scctl2, scctl3;
+ int i, j;
+ const struct video_levels *video_levels;
+ const struct color_conversion *color_conversion;
+ bool burst_ena;
+ int pipe = intel_crtc->pipe;
+
+ if (!tv_mode)
+ return; /* can't happen (mode_prepare prevents this) */
+
+ tv_ctl = I915_READ(TV_CTL);
+ tv_ctl &= TV_CTL_SAVE;
+
+ switch (intel_tv->type) {
+ default:
+ case DRM_MODE_CONNECTOR_Unknown:
+ case DRM_MODE_CONNECTOR_Composite:
+ tv_ctl |= TV_ENC_OUTPUT_COMPOSITE;
+ video_levels = tv_mode->composite_levels;
+ color_conversion = tv_mode->composite_color;
+ burst_ena = tv_mode->burst_ena;
+ break;
+ case DRM_MODE_CONNECTOR_Component:
+ tv_ctl |= TV_ENC_OUTPUT_COMPONENT;
+ video_levels = &component_levels;
+ if (tv_mode->burst_ena)
+ color_conversion = &sdtv_csc_yprpb;
+ else
+ color_conversion = &hdtv_csc_yprpb;
+ burst_ena = false;
+ break;
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
+ video_levels = tv_mode->svideo_levels;
+ color_conversion = tv_mode->svideo_color;
+ burst_ena = tv_mode->burst_ena;
+ break;
+ }
+ hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
+ (tv_mode->htotal << TV_HTOTAL_SHIFT);
+
+ hctl2 = (tv_mode->hburst_start << 16) |
+ (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT);
+
+ if (burst_ena)
+ hctl2 |= TV_BURST_ENA;
+
+ hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) |
+ (tv_mode->hblank_end << TV_HBLANK_END_SHIFT);
+
+ vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) |
+ (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) |
+ (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT);
+
+ vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) |
+ (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) |
+ (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT);
+
+ vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) |
+ (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) |
+ (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT);
+
+ if (tv_mode->veq_ena)
+ vctl3 |= TV_EQUAL_ENA;
+
+ vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) |
+ (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT);
+
+ vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) |
+ (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT);
+
+ vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) |
+ (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT);
+
+ vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
+ (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
+
+ if (intel_crtc->pipe == 1)
+ tv_ctl |= TV_ENC_PIPEB_SELECT;
+ tv_ctl |= tv_mode->oversample;
+
+ if (tv_mode->progressive)
+ tv_ctl |= TV_PROGRESSIVE;
+ if (tv_mode->trilevel_sync)
+ tv_ctl |= TV_TRILEVEL_SYNC;
+ if (tv_mode->pal_burst)
+ tv_ctl |= TV_PAL_BURST;
+
+ scctl1 = 0;
+ if (tv_mode->dda1_inc)
+ scctl1 |= TV_SC_DDA1_EN;
+ if (tv_mode->dda2_inc)
+ scctl1 |= TV_SC_DDA2_EN;
+ if (tv_mode->dda3_inc)
+ scctl1 |= TV_SC_DDA3_EN;
+ scctl1 |= tv_mode->sc_reset;
+ if (video_levels)
+ scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
+ scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
+
+ scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
+ tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT;
+
+ scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT |
+ tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
+
+ /* Enable two fixes for the chips that need them. */
+ if (dev->pci_device < 0x2772)
+ tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
+
+ I915_WRITE(TV_H_CTL_1, hctl1);
+ I915_WRITE(TV_H_CTL_2, hctl2);
+ I915_WRITE(TV_H_CTL_3, hctl3);
+ I915_WRITE(TV_V_CTL_1, vctl1);
+ I915_WRITE(TV_V_CTL_2, vctl2);
+ I915_WRITE(TV_V_CTL_3, vctl3);
+ I915_WRITE(TV_V_CTL_4, vctl4);
+ I915_WRITE(TV_V_CTL_5, vctl5);
+ I915_WRITE(TV_V_CTL_6, vctl6);
+ I915_WRITE(TV_V_CTL_7, vctl7);
+ I915_WRITE(TV_SC_CTL_1, scctl1);
+ I915_WRITE(TV_SC_CTL_2, scctl2);
+ I915_WRITE(TV_SC_CTL_3, scctl3);
+
+ if (color_conversion) {
+ I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
+ color_conversion->gy);
+ I915_WRITE(TV_CSC_Y2,(color_conversion->by << 16) |
+ color_conversion->ay);
+ I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
+ color_conversion->gu);
+ I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
+ color_conversion->au);
+ I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
+ color_conversion->gv);
+ I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
+ color_conversion->av);
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+ else
+ I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+
+ if (video_levels)
+ I915_WRITE(TV_CLR_LEVEL,
+ ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
+ (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
+ {
+ int pipeconf_reg = PIPECONF(pipe);
+ int dspcntr_reg = DSPCNTR(intel_crtc->plane);
+ int pipeconf = I915_READ(pipeconf_reg);
+ int dspcntr = I915_READ(dspcntr_reg);
+ int xpos = 0x0, ypos = 0x0;
+ unsigned int xsize, ysize;
+ /* Pipe must be off here */
+ I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev_priv, intel_crtc->plane);
+
+ /* Wait for vblank for the disable to take effect */
+ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
+ /* Wait for vblank for the disable to take effect. */
+ intel_wait_for_pipe_off(dev, intel_crtc->pipe);
+
+ /* Filter ctl must be set before TV_WIN_SIZE */
+ I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
+ xsize = tv_mode->hblank_start - tv_mode->hblank_end;
+ if (tv_mode->progressive)
+ ysize = tv_mode->nbr_end + 1;
+ else
+ ysize = 2*tv_mode->nbr_end + 1;
+
+ xpos += intel_tv->margin[TV_MARGIN_LEFT];
+ ypos += intel_tv->margin[TV_MARGIN_TOP];
+ xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
+ intel_tv->margin[TV_MARGIN_RIGHT]);
+ ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
+ intel_tv->margin[TV_MARGIN_BOTTOM]);
+ I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
+ I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
+
+ I915_WRITE(pipeconf_reg, pipeconf);
+ I915_WRITE(dspcntr_reg, dspcntr);
+ intel_flush_display_plane(dev_priv, intel_crtc->plane);
+ }
+
+ j = 0;
+ for (i = 0; i < 60; i++)
+ I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ for (i = 0; i < 60; i++)
+ I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ for (i = 0; i < 43; i++)
+ I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ for (i = 0; i < 43; i++)
+ I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
+ I915_WRITE(TV_CTL, tv_ctl);
+}
+
+static const struct drm_display_mode reported_modes[] = {
+ {
+ .name = "NTSC 480i",
+ .clock = 107520,
+ .hdisplay = 1280,
+ .hsync_start = 1368,
+ .hsync_end = 1496,
+ .htotal = 1712,
+
+ .vdisplay = 1024,
+ .vsync_start = 1027,
+ .vsync_end = 1034,
+ .vtotal = 1104,
+ .type = DRM_MODE_TYPE_DRIVER,
+ },
+};
+
+/**
+ * Detects TV presence by checking for load.
+ *
+ * Requires that the current pipe's DPLL is active.
+
+ * \return true if TV is connected.
+ * \return false if TV is disconnected.
+ */
+static int
+intel_tv_detect_type (struct intel_tv *intel_tv,
+ struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = &intel_tv->base.base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+ u32 tv_ctl, save_tv_ctl;
+ u32 tv_dac, save_tv_dac;
+ int type;
+
+ /* Disable TV interrupts around load detect or we'll recurse */
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_disable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
+ save_tv_dac = tv_dac = I915_READ(TV_DAC);
+ save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+
+ /* Poll for TV detection */
+ tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
+ tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+ if (intel_crtc->pipe == 1)
+ tv_ctl |= TV_ENC_PIPEB_SELECT;
+ else
+ tv_ctl &= ~TV_ENC_PIPEB_SELECT;
+
+ tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
+ tv_dac |= (TVDAC_STATE_CHG_EN |
+ TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL |
+ TVDAC_C_SENSE_CTL |
+ DAC_CTL_OVERRIDE |
+ DAC_A_0_7_V |
+ DAC_B_0_7_V |
+ DAC_C_0_7_V);
+
+
+ /*
+ * The TV sense state should be cleared to zero on cantiga platform. Otherwise
+ * the TV is misdetected. This is hardware requirement.
+ */
+ if (IS_GM45(dev))
+ tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
+
+ I915_WRITE(TV_CTL, tv_ctl);
+ I915_WRITE(TV_DAC, tv_dac);
+ POSTING_READ(TV_DAC);
+
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+
+ type = -1;
+ tv_dac = I915_READ(TV_DAC);
+ DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+ /*
+ * A B C
+ * 0 1 1 Composite
+ * 1 0 X svideo
+ * 0 0 0 Component
+ */
+ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+ DRM_DEBUG_KMS("Detected Composite TV connection\n");
+ type = DRM_MODE_CONNECTOR_Composite;
+ } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+ DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+ type = DRM_MODE_CONNECTOR_SVIDEO;
+ } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+ DRM_DEBUG_KMS("Detected Component TV connection\n");
+ type = DRM_MODE_CONNECTOR_Component;
+ } else {
+ DRM_DEBUG_KMS("Unrecognised TV connection\n");
+ type = -1;
+ }
+
+ I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ I915_WRITE(TV_CTL, save_tv_ctl);
+ POSTING_READ(TV_CTL);
+
+ /* For unknown reasons the hw barfs if we don't do this vblank wait. */
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+
+ /* Restore interrupt config */
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
+ return type;
+}
+
+/*
+ * Here we set accurate tv format according to connector type
+ * i.e Component TV should not be assigned by NTSC or PAL
+ */
+static void intel_tv_find_better_format(struct drm_connector *connector)
+{
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ int i;
+
+ if (tv_mode == NULL ||
+ (intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
+ tv_mode->component_only)
+ return;
+
+
+ for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
+ tv_mode = tv_modes + i;
+
+ if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
+ tv_mode->component_only)
+ break;
+ }
+
+ intel_tv->tv_format = tv_mode->name;
+ drm_object_property_set_value(&connector->base,
+ connector->dev->mode_config.tv_mode_property, i);
+}
+
+/**
+ * Detect the TV connection.
+ *
+ * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure
+ * we have a pipe programmed in order to probe the TV.
+ */
+static enum drm_connector_status
+intel_tv_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_display_mode mode;
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ int type;
+
+ mode = reported_modes[0];
+
+ if (force) {
+ struct intel_load_detect_pipe tmp;
+
+ if (intel_get_load_detect_pipe(connector, &mode, &tmp)) {
+ type = intel_tv_detect_type(intel_tv, connector);
+ intel_release_load_detect_pipe(connector, &tmp);
+ } else
+ return connector_status_unknown;
+ } else
+ return connector->status;
+
+ if (type < 0)
+ return connector_status_disconnected;
+
+ intel_tv->type = type;
+ intel_tv_find_better_format(connector);
+
+ return connector_status_connected;
+}
+
+static const struct input_res {
+ const char *name;
+ int w, h;
+} input_res_table[] = {
+ {"640x480", 640, 480},
+ {"800x600", 800, 600},
+ {"1024x768", 1024, 768},
+ {"1280x1024", 1280, 1024},
+ {"848x480", 848, 480},
+ {"1280x720", 1280, 720},
+ {"1920x1080", 1920, 1080},
+};
+
+/*
+ * Chose preferred mode according to line number of TV format
+ */
+static void
+intel_tv_chose_preferred_modes(struct drm_connector *connector,
+ struct drm_display_mode *mode_ptr)
+{
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+ if (tv_mode == NULL)
+ mode_ptr->type |= DRM_MODE_TYPE_BUILTIN;
+ else if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
+ mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+ else if (tv_mode->nbr_end > 480) {
+ if (tv_mode->progressive == true && tv_mode->nbr_end < 720) {
+ if (mode_ptr->vdisplay == 720)
+ mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+ } else if (mode_ptr->vdisplay == 1080)
+ mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+ }
+}
+
+/**
+ * Stub get_modes function.
+ *
+ * This should probably return a set of fixed modes, unless we can figure out
+ * how to probe modes off of TV connections.
+ */
+
+static int
+intel_tv_get_modes(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode_ptr;
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ int j, count = 0;
+ u64 tmp;
+
+ if(tv_mode == NULL)
+ return 0;
+
+ for (j = 0; j < ARRAY_SIZE(input_res_table);
+ j++) {
+ const struct input_res *input = &input_res_table[j];
+ unsigned int hactive_s = input->w;
+ unsigned int vactive_s = input->h;
+
+ if (tv_mode->max_srcw && input->w > tv_mode->max_srcw)
+ continue;
+
+ if (input->w > 1024 && (!tv_mode->progressive
+ && !tv_mode->component_only))
+ continue;
+
+ mode_ptr = drm_mode_create(connector->dev);
+ if (!mode_ptr)
+ continue;
+ (void) strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
+
+ mode_ptr->hdisplay = hactive_s;
+ mode_ptr->hsync_start = hactive_s + 1;
+ mode_ptr->hsync_end = hactive_s + 64;
+ if (mode_ptr->hsync_end <= mode_ptr->hsync_start)
+ mode_ptr->hsync_end = mode_ptr->hsync_start + 1;
+ mode_ptr->htotal = hactive_s + 96;
+
+ mode_ptr->vdisplay = vactive_s;
+ mode_ptr->vsync_start = vactive_s + 1;
+ mode_ptr->vsync_end = vactive_s + 32;
+ if (mode_ptr->vsync_end <= mode_ptr->vsync_start)
+ mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
+ mode_ptr->vtotal = vactive_s + 33;
+
+ tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
+ tmp *= mode_ptr->htotal;
+ tmp = div_u64(tmp, 1000000);
+ mode_ptr->clock = (int) tmp;
+
+ mode_ptr->type = DRM_MODE_TYPE_DRIVER;
+ intel_tv_chose_preferred_modes(connector, mode_ptr);
+ drm_mode_probed_add(connector, mode_ptr);
+ count++;
+ }
+
+ return count;
+}
+
+static void
+intel_tv_destroy (struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+ kfree(connector, sizeof(struct intel_connector));
+}
+
+
+static int
+intel_tv_set_property(struct drm_connector *connector, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_tv *intel_tv = intel_attached_tv(connector);
+ struct drm_crtc *crtc = intel_tv->base.base.crtc;
+ int ret = 0;
+ bool changed = false;
+
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret < 0)
+ goto out;
+
+ if (property == dev->mode_config.tv_left_margin_property &&
+ intel_tv->margin[TV_MARGIN_LEFT] != val) {
+ intel_tv->margin[TV_MARGIN_LEFT] = (int) val;
+ changed = true;
+ } else if (property == dev->mode_config.tv_right_margin_property &&
+ intel_tv->margin[TV_MARGIN_RIGHT] != val) {
+ intel_tv->margin[TV_MARGIN_RIGHT] = (int) val;
+ changed = true;
+ } else if (property == dev->mode_config.tv_top_margin_property &&
+ intel_tv->margin[TV_MARGIN_TOP] != val) {
+ intel_tv->margin[TV_MARGIN_TOP] = (int) val;
+ changed = true;
+ } else if (property == dev->mode_config.tv_bottom_margin_property &&
+ intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
+ intel_tv->margin[TV_MARGIN_BOTTOM] = (int) val;
+ changed = true;
+ } else if (property == dev->mode_config.tv_mode_property) {
+ if (val >= ARRAY_SIZE(tv_modes)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
+ goto out;
+
+ intel_tv->tv_format = tv_modes[val].name;
+ changed = true;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (changed && crtc)
+ intel_crtc_restore_mode(crtc);
+out:
+ return ret;
+}
+
+static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
+ .mode_set = intel_tv_mode_set,
+};
+
+static const struct drm_connector_funcs intel_tv_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_tv_detect,
+ .destroy = intel_tv_destroy,
+ .set_property = intel_tv_set_property,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
+ .mode_valid = intel_tv_mode_valid,
+ .get_modes = intel_tv_get_modes,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_tv_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the integrated TV is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the TV is present.
+ */
+static int tv_is_present_in_vbt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, ret;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return 1;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ p_child = dev_priv->vbt.child_dev + i;
+ /*
+ * If the device type is not TV, continue.
+ */
+ if (p_child->device_type != DEVICE_TYPE_INT_TV &&
+ p_child->device_type != DEVICE_TYPE_TV)
+ continue;
+ /* Only when the addin_offset is non-zero, it is regarded
+ * as present.
+ */
+ if (p_child->addin_offset) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
+
+void
+intel_tv_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+ struct intel_tv *intel_tv;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
+ u32 tv_dac_on, tv_dac_off, save_tv_dac;
+ char *tv_format_names[ARRAY_SIZE(tv_modes)];
+ int i, initial_mode = 0;
+
+ if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+ return;
+
+ if (!tv_is_present_in_vbt(dev)) {
+ DRM_DEBUG_KMS("Integrated TV is not present.\n");
+ return;
+ }
+ /* Even if we have an encoder we may not have a connector */
+ if (!dev_priv->vbt.int_tv_support)
+ return;
+
+ /*
+ * Sanity check the TV output by checking to see if the
+ * DAC register holds a value
+ */
+ save_tv_dac = I915_READ(TV_DAC);
+
+ I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
+ tv_dac_on = I915_READ(TV_DAC);
+
+ I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ tv_dac_off = I915_READ(TV_DAC);
+
+ I915_WRITE(TV_DAC, save_tv_dac);
+
+ /*
+ * If the register does not hold the state change enable
+ * bit, (either as a 0 or a 1), assume it doesn't really
+ * exist
+ */
+ if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 ||
+ (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
+ return;
+
+ intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
+ if (!intel_tv) {
+ return;
+ }
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_tv, sizeof(*intel_tv));
+ return;
+ }
+
+ intel_encoder = &intel_tv->base;
+ connector = &intel_connector->base;
+
+ /* The documentation, for the older chipsets at least, recommend
+ * using a polling method rather than hotplug detection for TVs.
+ * This is because in order to perform the hotplug detection, the PLLs
+ * for the TV must be kept alive increasing power drain and starving
+ * bandwidth from other encoders. Notably for instance, it causes
+ * pipe underruns on Crestline when this encoder is supposedly idle.
+ *
+ * More recent chipsets favour HDMI rather than integrated S-Video.
+ */
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ (void) drm_connector_init(dev, connector, &intel_tv_connector_funcs,
+ DRM_MODE_CONNECTOR_SVIDEO);
+
+ (void) drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
+ DRM_MODE_ENCODER_TVDAC);
+
+ intel_encoder->compute_config = intel_tv_compute_config;
+ intel_encoder->enable = intel_enable_tv;
+ intel_encoder->disable = intel_disable_tv;
+ intel_encoder->get_hw_state = intel_tv_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_encoder->type = INTEL_OUTPUT_TVOUT;
+ intel_encoder->type_size = sizeof(struct intel_tv);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->cloneable = false;
+ intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
+ intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+ intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
+
+ /* BIOS margin values */
+ intel_tv->margin[TV_MARGIN_LEFT] = 54;
+ intel_tv->margin[TV_MARGIN_TOP] = 36;
+ intel_tv->margin[TV_MARGIN_RIGHT] = 46;
+ intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
+
+ intel_tv->tv_format = tv_modes[initial_mode].name;
+
+ drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
+ (void) drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /* Create TV properties then attach current values */
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
+ tv_format_names[i] = (char *)tv_modes[i].name;
+ (void) drm_mode_create_tv_properties(dev,
+ ARRAY_SIZE(tv_modes),
+ tv_format_names);
+
+ drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
+ initial_mode);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tv_left_margin_property,
+ intel_tv->margin[TV_MARGIN_LEFT]);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tv_top_margin_property,
+ intel_tv->margin[TV_MARGIN_TOP]);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tv_right_margin_property,
+ intel_tv->margin[TV_MARGIN_RIGHT]);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tv_bottom_margin_property,
+ intel_tv->margin[TV_MARGIN_BOTTOM]);
+}
diff --git a/usr/src/uts/intel/io/drm/r300_cmdbuf.c b/usr/src/uts/intel/io/radeon/r300_cmdbuf.c
index bc2eb7b..bc2eb7b 100644
--- a/usr/src/uts/intel/io/drm/r300_cmdbuf.c
+++ b/usr/src/uts/intel/io/radeon/r300_cmdbuf.c
diff --git a/usr/src/uts/intel/io/drm/r300_reg.h b/usr/src/uts/intel/io/radeon/r300_reg.h
index 8f30b80..8f30b80 100644
--- a/usr/src/uts/intel/io/drm/r300_reg.h
+++ b/usr/src/uts/intel/io/radeon/r300_reg.h
diff --git a/usr/src/uts/intel/io/drm/radeon_cp.c b/usr/src/uts/intel/io/radeon/radeon_cp.c
index 0261249..0261249 100644
--- a/usr/src/uts/intel/io/drm/radeon_cp.c
+++ b/usr/src/uts/intel/io/radeon/radeon_cp.c
diff --git a/usr/src/uts/intel/io/drm/radeon_drm.h b/usr/src/uts/intel/io/radeon/radeon_drm.h
index b68eff3..b68eff3 100644
--- a/usr/src/uts/intel/io/drm/radeon_drm.h
+++ b/usr/src/uts/intel/io/radeon/radeon_drm.h
diff --git a/usr/src/uts/intel/io/drm/radeon_drv.c b/usr/src/uts/intel/io/radeon/radeon_drv.c
index 8bc827c..8bc827c 100644
--- a/usr/src/uts/intel/io/drm/radeon_drv.c
+++ b/usr/src/uts/intel/io/radeon/radeon_drv.c
diff --git a/usr/src/uts/intel/io/drm/radeon_drv.h b/usr/src/uts/intel/io/radeon/radeon_drv.h
index 0a0b046..0a0b046 100644
--- a/usr/src/uts/intel/io/drm/radeon_drv.h
+++ b/usr/src/uts/intel/io/radeon/radeon_drv.h
diff --git a/usr/src/uts/intel/io/drm/radeon_io32.h b/usr/src/uts/intel/io/radeon/radeon_io32.h
index 2febb5f..2febb5f 100644
--- a/usr/src/uts/intel/io/drm/radeon_io32.h
+++ b/usr/src/uts/intel/io/radeon/radeon_io32.h
diff --git a/usr/src/uts/intel/io/drm/radeon_irq.c b/usr/src/uts/intel/io/radeon/radeon_irq.c
index 0c7e2e8..0c7e2e8 100644
--- a/usr/src/uts/intel/io/drm/radeon_irq.c
+++ b/usr/src/uts/intel/io/radeon/radeon_irq.c
diff --git a/usr/src/uts/intel/io/drm/radeon_mem.c b/usr/src/uts/intel/io/radeon/radeon_mem.c
index b2ddba5..b2ddba5 100644
--- a/usr/src/uts/intel/io/drm/radeon_mem.c
+++ b/usr/src/uts/intel/io/radeon/radeon_mem.c
diff --git a/usr/src/uts/intel/io/drm/radeon_state.c b/usr/src/uts/intel/io/radeon/radeon_state.c
index 9323215..9323215 100644
--- a/usr/src/uts/intel/io/drm/radeon_state.c
+++ b/usr/src/uts/intel/io/radeon/radeon_state.c