summaryrefslogtreecommitdiff
path: root/usr/src/uts/common
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/common')
-rw-r--r--usr/src/uts/common/drm/drm.h928
-rw-r--r--usr/src/uts/common/drm/drmP.h1606
-rw-r--r--usr/src/uts/common/drm/drm_atomic.h (renamed from usr/src/uts/common/io/drm/drm_atomic.h)18
-rw-r--r--usr/src/uts/common/drm/drm_core.h49
-rw-r--r--usr/src/uts/common/drm/drm_crtc.h1068
-rw-r--r--usr/src/uts/common/drm/drm_crtc_helper.h176
-rw-r--r--usr/src/uts/common/drm/drm_dp_helper.h368
-rw-r--r--usr/src/uts/common/drm/drm_edid.h295
-rw-r--r--usr/src/uts/common/drm/drm_fb_helper.h121
-rw-r--r--usr/src/uts/common/drm/drm_fourcc.h229
-rw-r--r--usr/src/uts/common/drm/drm_io32.h (renamed from usr/src/uts/common/io/drm/drm_io32.h)74
-rw-r--r--usr/src/uts/common/drm/drm_linux.h210
-rw-r--r--usr/src/uts/common/drm/drm_linux_list.h165
-rw-r--r--usr/src/uts/common/drm/drm_mm.h242
-rw-r--r--usr/src/uts/common/drm/drm_mode.h559
-rw-r--r--usr/src/uts/common/drm/drm_os_solaris.h119
-rw-r--r--usr/src/uts/common/drm/drm_pciids.h250
-rw-r--r--usr/src/uts/common/drm/drm_rect.h167
-rw-r--r--usr/src/uts/common/drm/drm_sarea.h (renamed from usr/src/uts/common/io/drm/drm_sarea.h)45
-rw-r--r--usr/src/uts/common/drm/drm_sun_i2c.h75
-rw-r--r--usr/src/uts/common/drm/drm_sun_idr.h90
-rw-r--r--usr/src/uts/common/drm/drm_sun_pci.h84
-rw-r--r--usr/src/uts/common/drm/drm_sun_timer.h56
-rw-r--r--usr/src/uts/common/drm/drm_sun_workqueue.h53
-rw-r--r--usr/src/uts/common/drm/drm_sunmod.h118
-rw-r--r--usr/src/uts/common/drm/i915_drm.h1033
-rw-r--r--usr/src/uts/common/io/drm/LICENSE_DRM1349
-rw-r--r--usr/src/uts/common/io/drm/LICENSE_DRM.descrip (renamed from usr/src/uts/common/io/drm/THIRDPARTYLICENSE.descrip)0
-rw-r--r--usr/src/uts/common/io/drm/THIRDPARTYLICENSE314
-rw-r--r--usr/src/uts/common/io/drm/ati_pcigart.c38
-rw-r--r--usr/src/uts/common/io/drm/drm.h865
-rw-r--r--usr/src/uts/common/io/drm/drmP.h1103
-rw-r--r--usr/src/uts/common/io/drm/drm_agpsupport.c796
-rw-r--r--usr/src/uts/common/io/drm/drm_auth.c236
-rw-r--r--usr/src/uts/common/io/drm/drm_bufs.c1462
-rw-r--r--usr/src/uts/common/io/drm/drm_cache.c11
-rw-r--r--usr/src/uts/common/io/drm/drm_context.c660
-rw-r--r--usr/src/uts/common/io/drm/drm_crtc.c3944
-rw-r--r--usr/src/uts/common/io/drm/drm_crtc_helper.c1084
-rw-r--r--usr/src/uts/common/io/drm/drm_dma.c166
-rw-r--r--usr/src/uts/common/io/drm/drm_dp_helper.c147
-rw-r--r--usr/src/uts/common/io/drm/drm_dp_i2c_helper.c207
-rw-r--r--usr/src/uts/common/io/drm/drm_drawable.c74
-rw-r--r--usr/src/uts/common/io/drm/drm_drv.c1013
-rw-r--r--usr/src/uts/common/io/drm/drm_edid.c2995
-rw-r--r--usr/src/uts/common/io/drm/drm_fb_helper.c958
-rw-r--r--usr/src/uts/common/io/drm/drm_fops.c522
-rw-r--r--usr/src/uts/common/io/drm/drm_gem.c875
-rw-r--r--usr/src/uts/common/io/drm/drm_io32.c386
-rw-r--r--usr/src/uts/common/io/drm/drm_ioctl.c581
-rw-r--r--usr/src/uts/common/io/drm/drm_irq.c1634
-rw-r--r--usr/src/uts/common/io/drm/drm_kstat.c46
-rw-r--r--usr/src/uts/common/io/drm/drm_linux.c72
-rw-r--r--usr/src/uts/common/io/drm/drm_linux_list.h99
-rw-r--r--usr/src/uts/common/io/drm/drm_lock.c332
-rw-r--r--usr/src/uts/common/io/drm/drm_memory.c222
-rw-r--r--usr/src/uts/common/io/drm/drm_mm.c887
-rw-r--r--usr/src/uts/common/io/drm/drm_modes.c1140
-rw-r--r--usr/src/uts/common/io/drm/drm_msg.c51
-rw-r--r--usr/src/uts/common/io/drm/drm_pci.c180
-rw-r--r--usr/src/uts/common/io/drm/drm_rect.c285
-rw-r--r--usr/src/uts/common/io/drm/drm_scatter.c169
-rw-r--r--usr/src/uts/common/io/drm/drm_stub.c3
-rw-r--r--usr/src/uts/common/io/drm/drm_sun_i2c.c496
-rw-r--r--usr/src/uts/common/io/drm/drm_sun_idr.c488
-rw-r--r--usr/src/uts/common/io/drm/drm_sun_pci.c219
-rw-r--r--usr/src/uts/common/io/drm/drm_sun_timer.c90
-rw-r--r--usr/src/uts/common/io/drm/drm_sun_workqueue.c93
-rw-r--r--usr/src/uts/common/io/drm/drm_sunmod.c1372
-rw-r--r--usr/src/uts/common/io/drm/drm_sunmod.h160
-rw-r--r--usr/src/uts/common/io/drm/drm_sysfs.c141
-rw-r--r--usr/src/uts/common/io/drm/queue.h585
72 files changed, 28977 insertions, 7771 deletions
diff --git a/usr/src/uts/common/drm/drm.h b/usr/src/uts/common/drm/drm.h
new file mode 100644
index 0000000..1e7c192
--- /dev/null
+++ b/usr/src/uts/common/drm/drm.h
@@ -0,0 +1,928 @@
+/**
+ * \file drm.h
+ * Header for the Direct Rendering Manager
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ * \par Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_H_
+#define _DRM_H_
+
+#if defined(__linux__)
+
+#include <linux/types.h>
+#include <asm/ioctl.h>
+typedef unsigned int drm_handle_t;
+
+#else /* One of the BSDs or Solaris */
+
+#include <sys/ioccom.h>
+#include <sys/types.h>
+typedef int8_t __s8;
+typedef uint8_t __u8;
+typedef int16_t __s16;
+typedef uint16_t __u16;
+typedef int32_t __s32;
+typedef uint32_t __u32;
+typedef int64_t __s64;
+typedef uint64_t __u64;
+
+#if defined(__SOLARIS__) || defined(__sun)
+#include <sys/types32.h>
+typedef unsigned long long drm_handle_t;
+
+#else /* !__SOLARIS__ */
+typedef unsigned long drm_handle_t;
+
+#endif /* __SOLARIS__ || __sun */
+
+
+#endif /* __linux__ */
+/* Solaris-specific. */
+#if defined(__SOLARIS__) || defined(__sun)
+
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+#define _IOC_SIZEBITS 13
+#define _IOC_DIRBITS 3
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT + _IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT + _IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT + _IOC_SIZEBITS)
+
+#define _IOC(dir, type, nr, size) \
+ (((dir) /* already shifted */) | \
+ ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
+#define _IOC_TYPE(req) ((req >> _IOC_TYPESHIFT) & ((1 << _IOC_TYPEBITS) -1))
+
+#endif /* __Solaris__ or __sun */
+
+#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
+#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
+#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
+#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
+
+#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
+#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
+#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
+#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
+#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
+
+typedef unsigned int drm_context_t;
+typedef unsigned int drm_drawable_t;
+typedef unsigned int drm_magic_t;
+
+/**
+ * Cliprect.
+ *
+ * \warning: If you change this structure, make sure you change
+ * XF86DRIClipRectRec in the server as well
+ *
+ * \note KW: Actually it's illegal to change either for
+ * backwards-compatibility reasons.
+ */
+struct drm_clip_rect {
+ unsigned short x1;
+ unsigned short y1;
+ unsigned short x2;
+ unsigned short y2;
+};
+
+/**
+ * Drawable information.
+ */
+struct drm_drawable_info {
+ unsigned int num_rects;
+ struct drm_clip_rect *rects;
+};
+
+/**
+ * Texture region,
+ */
+struct drm_tex_region {
+ unsigned char next;
+ unsigned char prev;
+ unsigned char in_use;
+ unsigned char padding;
+ unsigned int age;
+};
+
+/**
+ * Hardware lock.
+ *
+ * The lock structure is a simple cache-line aligned integer. To avoid
+ * processor bus contention on a multiprocessor system, there should not be any
+ * other data stored in the same cache line.
+ */
+struct drm_hw_lock {
+ __volatile__ unsigned int lock; /**< lock variable */
+ char padding[60]; /**< Pad to cache line */
+};
+
+/**
+ * DRM_IOCTL_VERSION ioctl argument type.
+ *
+ * \sa drmGetVersion().
+ */
+struct drm_version {
+ int version_major; /**< Major version */
+ int version_minor; /**< Minor version */
+ int version_patchlevel; /**< Patch level */
+ size_t name_len; /**< Length of name buffer */
+ char *name; /**< Name of driver */
+ size_t date_len; /**< Length of date buffer */
+ char *date; /**< User-space buffer to hold date */
+ size_t desc_len; /**< Length of desc buffer */
+ char *desc; /**< User-space buffer to hold desc */
+};
+
+/**
+ * DRM_IOCTL_GET_UNIQUE ioctl argument type.
+ *
+ * \sa drmGetBusid() and drmSetBusId().
+ */
+struct drm_unique {
+ size_t unique_len; /**< Length of unique */
+ char *unique; /**< Unique name for driver instantiation */
+};
+
+struct drm_list {
+ int count; /**< Length of user-space structures */
+ struct drm_version *version;
+};
+
+struct drm_block {
+ int unused;
+};
+
+/**
+ * DRM_IOCTL_CONTROL ioctl argument type.
+ *
+ * \sa drmCtlInstHandler() and drmCtlUninstHandler().
+ */
+struct drm_control {
+ enum {
+ DRM_ADD_COMMAND,
+ DRM_RM_COMMAND,
+ DRM_INST_HANDLER,
+ DRM_UNINST_HANDLER
+ } func;
+ int irq;
+};
+
+/**
+ * Type of memory to map.
+ */
+enum drm_map_type {
+ _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
+ _DRM_REGISTERS = 1, /**< no caching, no core dump */
+ _DRM_SHM = 2, /**< shared, cached */
+ _DRM_AGP = 3, /**< AGP/GART */
+ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
+ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
+ _DRM_GEM = 6 /**< GEM object */
+};
+
+/**
+ * Memory mapping flags.
+ */
+enum drm_map_flags {
+ _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
+ _DRM_READ_ONLY = 0x02,
+ _DRM_LOCKED = 0x04, /**< shared, cached, locked */
+ _DRM_KERNEL = 0x08, /**< kernel requires access */
+ _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
+ _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
+ _DRM_REMOVABLE = 0x40, /**< Removable mapping */
+ _DRM_DRIVER = 0x80 /**< Managed by driver */
+};
+
+struct drm_ctx_priv_map {
+ unsigned int ctx_id; /**< Context requesting private mapping */
+ void *handle; /**< Handle of map */
+};
+
+/**
+ * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
+ * argument type.
+ *
+ * \sa drmAddMap().
+ */
+struct drm_map {
+ unsigned long long offset; /**< Requested physical address (0 for SAREA)*/
+ unsigned long size; /**< Requested physical size (bytes) */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
+ unsigned long long handle; /**< User-space: "Handle" to pass to mmap() */
+ /**< Kernel-space: kernel-virtual address */
+ int mtrr; /**< MTRR slot used */
+ /* Private data */
+};
+
+/**
+ * DRM_IOCTL_GET_CLIENT ioctl argument type.
+ */
+struct drm_client {
+ int idx; /**< Which client desired? */
+ int auth; /**< Is client authenticated? */
+ unsigned long pid; /**< Process ID */
+ unsigned long uid; /**< User ID */
+ unsigned long magic; /**< Magic */
+ unsigned long iocs; /**< Ioctl count */
+};
+
+enum drm_stat_type {
+ _DRM_STAT_LOCK,
+ _DRM_STAT_OPENS,
+ _DRM_STAT_CLOSES,
+ _DRM_STAT_IOCTLS,
+ _DRM_STAT_LOCKS,
+ _DRM_STAT_UNLOCKS,
+ _DRM_STAT_VALUE, /**< Generic value */
+ _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
+ _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
+
+ _DRM_STAT_IRQ, /**< IRQ */
+ _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
+ _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
+ _DRM_STAT_DMA, /**< DMA */
+ _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
+ _DRM_STAT_MISSED /**< Missed DMA opportunity */
+ /* Add to the *END* of the list */
+};
+
+/**
+ * DRM_IOCTL_GET_STATS ioctl argument type.
+ */
+struct drm_stats {
+ unsigned long count;
+ struct {
+ unsigned long value;
+ enum drm_stat_type type;
+ } data[15];
+};
+
+/**
+ * Hardware locking flags.
+ */
+enum drm_lock_flags {
+ _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
+ _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
+ _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
+ _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
+ /* These *HALT* flags aren't supported yet
+ -- they will be used to support the
+ full-screen DGA-like mode. */
+ _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
+ _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
+};
+
+/**
+ * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
+ *
+ * \sa drmGetLock() and drmUnlock().
+ */
+struct drm_lock {
+ int context;
+ enum drm_lock_flags flags;
+};
+
+/**
+ * DMA flags
+ *
+ * \warning
+ * These values \e must match xf86drm.h.
+ *
+ * \sa drm_dma.
+ */
+enum drm_dma_flags {
+ /* Flags for DMA buffer dispatch */
+ _DRM_DMA_BLOCK = 0x01, /**<
+ * Block until buffer dispatched.
+ *
+ * \note The buffer may not yet have
+ * been processed by the hardware --
+ * getting a hardware lock with the
+ * hardware quiescent will ensure
+ * that the buffer has been
+ * processed.
+ */
+ _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
+ _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
+
+ /* Flags for DMA buffer request */
+ _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
+ _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
+ _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
+};
+
+/**
+ * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
+ *
+ * \sa drmAddBufs().
+ */
+
+typedef enum {
+ _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
+ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
+ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
+ _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
+ _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
+} drm_buf_flag;
+
+
+struct drm_buf_desc {
+ int count; /**< Number of buffers of this size */
+ int size; /**< Size in bytes */
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+ drm_buf_flag flags;
+ unsigned long agp_start; /**<
+ * Start address of where the AGP buffers are
+ * in the AGP aperture
+ */
+};
+
+/**
+ * DRM_IOCTL_INFO_BUFS ioctl argument type.
+ */
+struct drm_buf_info {
+ int count; /**< Entries in list */
+ struct drm_buf_desc *list;
+};
+
+/**
+ * DRM_IOCTL_FREE_BUFS ioctl argument type.
+ */
+struct drm_buf_free {
+ int count;
+ int *list;
+};
+
+/**
+ * Buffer information
+ *
+ * \sa drm_buf_map.
+ */
+struct drm_buf_pub {
+ int idx; /**< Index into the master buffer list */
+ int total; /**< Buffer size */
+ int used; /**< Amount of buffer in use (for DMA) */
+ void *address; /**< Address of buffer */
+};
+
+/**
+ * DRM_IOCTL_MAP_BUFS ioctl argument type.
+ */
+struct drm_buf_map {
+ int count; /**< Length of the buffer list */
+#ifdef __cplusplus
+ void *virt;
+#else
+ void *virtual; /**< Mmap'd area in user-virtual */
+#endif
+ struct drm_buf_pub *list; /**< Buffer information */
+ int fd;
+};
+
+/**
+ * DRM_IOCTL_DMA ioctl argument type.
+ *
+ * Indices here refer to the offset into the buffer list in drm_buf_get.
+ *
+ * \sa drmDMA().
+ */
+struct drm_dma {
+ int context; /**< Context handle */
+ int send_count; /**< Number of buffers to send */
+ int *send_indices; /**< List of handles to buffers */
+ int *send_sizes; /**< Lengths of data to send */
+ enum drm_dma_flags flags; /**< Flags */
+ int request_count; /**< Number of buffers requested */
+ int request_size; /**< Desired size for buffers */
+ int *request_indices; /**< Buffer information */
+ int *request_sizes;
+ int granted_count; /**< Number of buffers granted */
+};
+
+enum drm_ctx_flags {
+ _DRM_CONTEXT_PRESERVED = 0x01,
+ _DRM_CONTEXT_2DONLY = 0x02
+};
+
+/**
+ * DRM_IOCTL_ADD_CTX ioctl argument type.
+ *
+ * \sa drmCreateContext() and drmDestroyContext().
+ */
+struct drm_ctx {
+ drm_context_t handle;
+ enum drm_ctx_flags flags;
+};
+
+/**
+ * DRM_IOCTL_RES_CTX ioctl argument type.
+ */
+struct drm_ctx_res {
+ int count;
+ struct drm_ctx *contexts;
+};
+
+/**
+ * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
+ */
+struct drm_draw {
+ drm_drawable_t handle;
+};
+
+/**
+ * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
+ */
+typedef enum {
+ DRM_DRAWABLE_CLIPRECTS
+} drm_drawable_info_type_t;
+
+struct drm_update_draw {
+ drm_drawable_t handle;
+ unsigned int type;
+ unsigned int num;
+ unsigned long long data;
+};
+
+/**
+ * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
+ */
+struct drm_auth {
+ drm_magic_t magic;
+};
+
+/**
+ * DRM_IOCTL_IRQ_BUSID ioctl argument type.
+ *
+ * \sa drmGetInterruptFromBusID().
+ */
+struct drm_irq_busid {
+ int irq; /**< IRQ number */
+ int busnum; /**< bus number */
+ int devnum; /**< device number */
+ int funcnum; /**< function number */
+};
+
+enum drm_vblank_seq_type {
+ _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
+ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
+ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
+ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
+ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
+ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
+};
+#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
+#define _DRM_VBLANK_HIGH_CRTC_MASK 0x0000003e
+
+#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
+#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
+ _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
+
+struct drm_wait_vblank_request {
+ enum drm_vblank_seq_type type;
+ unsigned int sequence;
+ unsigned long signal;
+};
+
+struct drm_wait_vblank_reply {
+ enum drm_vblank_seq_type type;
+ unsigned int sequence;
+#if defined(__sun)
+ time_t tval_sec;
+ suseconds_t tval_usec;
+#else
+ long tval_sec;
+ long tval_usec;
+#endif
+};
+
+/**
+ * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
+ *
+ * \sa drmWaitVBlank().
+ */
+union drm_wait_vblank {
+ struct drm_wait_vblank_request request;
+ struct drm_wait_vblank_reply reply;
+};
+
+#define _DRM_PRE_MODESET 1
+#define _DRM_POST_MODESET 2
+
+/**
+ * DRM_IOCTL_MODESET_CTL ioctl argument type
+ *
+ * \sa drmModesetCtl().
+ */
+struct drm_modeset_ctl {
+ __u32 crtc;
+ __u32 cmd;
+};
+
+/**
+ * DRM_IOCTL_AGP_ENABLE ioctl argument type.
+ *
+ * \sa drmAgpEnable().
+ */
+struct drm_agp_mode {
+ unsigned long mode; /**< AGP mode */
+};
+
+/**
+ * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
+ *
+ * \sa drmAgpAlloc() and drmAgpFree().
+ */
+struct drm_agp_buffer {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for binding / unbinding */
+ unsigned long type; /**< Type of memory to allocate */
+ unsigned long physical; /**< Physical used by i810 */
+};
+
+/**
+ * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
+ *
+ * \sa drmAgpBind() and drmAgpUnbind().
+ */
+struct drm_agp_binding {
+ unsigned long handle; /**< From drm_agp_buffer */
+ unsigned long offset; /**< In bytes -- will round to page boundary */
+};
+
+/**
+ * DRM_IOCTL_AGP_INFO ioctl argument type.
+ *
+ * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
+ * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
+ * drmAgpVendorId() and drmAgpDeviceId().
+ */
+struct drm_agp_info {
+ int agp_version_major;
+ int agp_version_minor;
+ unsigned long mode;
+ unsigned long aperture_base; /* physical address */
+ unsigned long aperture_size; /* bytes */
+ unsigned long memory_allowed; /* bytes */
+ unsigned long memory_used;
+
+ /* PCI information */
+ unsigned short id_vendor;
+ unsigned short id_device;
+};
+
+/**
+ * DRM_IOCTL_SG_ALLOC ioctl argument type.
+ */
+struct drm_scatter_gather {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for mapping / unmapping */
+};
+
+/**
+ * DRM_IOCTL_SET_VERSION ioctl argument type.
+ */
+struct drm_set_version {
+ int drm_di_major;
+ int drm_di_minor;
+ int drm_dd_major;
+ int drm_dd_minor;
+};
+
+/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
+struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ __u32 handle;
+ __u32 pad;
+};
+
+/** DRM_IOCTL_GEM_FLINK ioctl argument type */
+struct drm_gem_flink {
+ /** Handle for the object being named */
+ __u32 handle;
+
+ /** Returned global name */
+ __u32 name;
+};
+
+/** DRM_IOCTL_GEM_OPEN ioctl argument type */
+struct drm_gem_open {
+ /** Name of object being opened */
+ __u32 name;
+
+ /** Returned handle for the object */
+ __u32 handle;
+
+ /** Returned size of the object */
+ __u64 size;
+};
+
+/*
+ * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
+ * combination for the hardware cursor. The intention is that a hardware
+ * agnostic userspace can query a cursor plane size to use.
+ *
+ * Note that the cross-driver contract is to merely return a valid size;
+ * drivers are free to attach another meaning on top, eg. i915 returns the
+ * maximum plane size.
+ */
+#define DRM_CAP_CURSOR_WIDTH 0x8
+#define DRM_CAP_CURSOR_HEIGHT 0x9
+#define DRM_CAP_ADDFB2_MODIFIERS 0x10
+
+/** DRM_IOCTL_GET_CAP ioctl argument type */
+struct drm_get_cap {
+ __u64 capability;
+ __u64 value;
+};
+
+/**
+ * DRM_CLIENT_CAP_STEREO_3D
+ *
+ * if set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * monitor by advertising the supported 3D layouts in the flags of struct
+ * drm_mode_modeinfo.
+ */
+#define DRM_CLIENT_CAP_STEREO_3D 1
+
+/**
+ * DRM_CLIENT_CAP_UNIVERSAL_PLANES
+ *
+ * if set to 1, the DRM core will expose the full universal plane list
+ * (including primary and cursor planes).
+ */
+#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
+
+/**
+ * DRM_CLIENT_CAP_ATOMIC
+ *
+ * If set to 1, the DRM core will allow atomic modesetting requests.
+ */
+#define DRM_CLIENT_CAP_ATOMIC 3
+
+/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+struct drm_set_client_cap {
+ __u64 capability;
+ __u64 value;
+};
+
+#define DRM_CLOEXEC O_CLOEXEC
+struct drm_prime_handle {
+ __u32 handle;
+
+ /** Flags.. only applicable for handle->fd */
+ __u32 flags;
+
+ /** Returned dmabuf file descriptor */
+ __s32 fd;
+};
+
+#include "drm_mode.h"
+
+#define DRM_IOCTL_BASE 'd'
+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE, (nr))
+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE, (nr), type)
+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE, (nr), type)
+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE, (nr), type)
+
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
+#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
+#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
+#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
+#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
+#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
+#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
+#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
+#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap)
+
+#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
+#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
+#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
+#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
+#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
+#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
+#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
+#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
+#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
+#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
+#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
+
+#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
+
+#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
+#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
+
+#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
+#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
+
+#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
+#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
+#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
+#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
+#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
+#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
+#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
+#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
+#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
+#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
+#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
+#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
+#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
+
+#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
+#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
+
+#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
+#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
+#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
+#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
+
+#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
+#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
+
+#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
+
+#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
+
+#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
+#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
+#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
+#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
+#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
+#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
+#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd)
+#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
+
+#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
+#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
+#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
+#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
+#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
+#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
+#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
+#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
+
+#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
+#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
+#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
+#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
+#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
+#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
+#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
+#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
+#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
+#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
+#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic)
+#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob)
+#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
+
+/**
+ * Device specific ioctls should only be in their respective headers
+ * The device specific ioctl range is from 0x40 to 0x99.
+ * Generic IOCTLS restart at 0xA0.
+ *
+ * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
+ * drmCommandReadWrite().
+ */
+#define DRM_COMMAND_BASE 0x40
+#define DRM_COMMAND_END 0xA0
+
+/**
+ * Header for events written back to userspace on the drm fd. The
+ * type defines the type of event, the length specifies the total
+ * length of the event (including the header), and user_data is
+ * typically a 64 bit value passed with the ioctl that triggered the
+ * event. A read on the drm fd will always only return complete
+ * events, that is, if for example the read buffer is 100 bytes, and
+ * there are two 64 byte events pending, only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
+ * up are chipset specific.
+ */
+struct drm_event {
+ __u32 type;
+ __u32 length;
+};
+
+#define DRM_EVENT_VBLANK 0x01
+#define DRM_EVENT_FLIP_COMPLETE 0x02
+
+struct drm_event_vblank {
+ struct drm_event base;
+ __u64 user_data;
+#if defined(__sun)
+ time_t tv_sec;
+ suseconds_t tv_usec;
+#else
+ __u32 tv_sec;
+ __u32 tv_usec;
+#endif
+ __u32 sequence;
+ __u32 reserved;
+};
+
+#define DRM_CAP_DUMB_BUFFER 0x1
+#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
+#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
+#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+#define DRM_CAP_PRIME 0x5
+#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
+#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
+#define DRM_CAP_ADDFB2_MODIFIERS 0x10
+
+#define DRM_PRIME_CAP_IMPORT 0x1
+#define DRM_PRIME_CAP_EXPORT 0x2
+
+/* typedef area */
+typedef struct drm_clip_rect drm_clip_rect_t;
+typedef struct drm_drawable_info drm_drawable_info_t;
+typedef struct drm_tex_region drm_tex_region_t;
+typedef struct drm_hw_lock drm_hw_lock_t;
+typedef struct drm_version drm_version_t;
+typedef struct drm_unique drm_unique_t;
+typedef struct drm_list drm_list_t;
+typedef struct drm_block drm_block_t;
+typedef struct drm_control drm_control_t;
+typedef enum drm_map_type drm_map_type_t;
+typedef enum drm_map_flags drm_map_flags_t;
+typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
+typedef struct drm_map drm_map_t;
+typedef struct drm_client drm_client_t;
+typedef enum drm_stat_type drm_stat_type_t;
+typedef struct drm_stats drm_stats_t;
+typedef enum drm_lock_flags drm_lock_flags_t;
+typedef struct drm_lock drm_lock_t;
+typedef enum drm_dma_flags drm_dma_flags_t;
+typedef struct drm_buf_desc drm_buf_desc_t;
+typedef struct drm_buf_info drm_buf_info_t;
+typedef struct drm_buf_free drm_buf_free_t;
+typedef struct drm_buf_pub drm_buf_pub_t;
+typedef struct drm_buf_map drm_buf_map_t;
+typedef struct drm_dma drm_dma_t;
+typedef union drm_wait_vblank drm_wait_vblank_t;
+typedef struct drm_agp_mode drm_agp_mode_t;
+typedef enum drm_ctx_flags drm_ctx_flags_t;
+typedef struct drm_ctx drm_ctx_t;
+typedef struct drm_ctx_res drm_ctx_res_t;
+typedef struct drm_draw drm_draw_t;
+typedef struct drm_update_draw drm_update_draw_t;
+typedef struct drm_auth drm_auth_t;
+typedef struct drm_irq_busid drm_irq_busid_t;
+typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
+
+typedef struct drm_agp_buffer drm_agp_buffer_t;
+typedef struct drm_agp_binding drm_agp_binding_t;
+typedef struct drm_agp_info drm_agp_info_t;
+typedef struct drm_scatter_gather drm_scatter_gather_t;
+typedef struct drm_set_version drm_set_version_t;
+
+#endif /* _DRM_H_ */
diff --git a/usr/src/uts/common/drm/drmP.h b/usr/src/uts/common/drm/drmP.h
new file mode 100644
index 0000000..cc32ddc
--- /dev/null
+++ b/usr/src/uts/common/drm/drmP.h
@@ -0,0 +1,1606 @@
+/*
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
+ * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
+ */
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+#ifndef _DRMP_H
+#define _DRMP_H
+
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/cmn_err.h>
+#include <sys/varargs.h>
+#include <sys/pci.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/sunldi.h>
+#include <sys/agpgart.h>
+#include <sys/time.h>
+#include <sys/gfx_private.h>
+#include <sys/ddifm.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/fm/io/ddi.h>
+#include <drm/drm.h>
+#include <drm/drm_os_solaris.h>
+#include "drm_atomic.h"
+#include <sys/queue.h>
+#include "drm_linux.h"
+#include "drm_linux_list.h"
+#include "drm_mm.h"
+#include "drm_mode.h"
+
+#include "drm_sun_idr.h"
+#include "drm_sun_timer.h"
+#include "drm_sun_workqueue.h"
+#include "drm_sun_pci.h"
+
+#ifndef __inline__
+#define __inline__ inline
+#endif
+
+#if !defined(__FUNCTION__)
+#if defined(_STDC_C99) || defined(__C99FEATURES__)
+#define __FUNCTION__ __func__
+#else
+#define __FUNCTION__ " "
+#endif
+#endif
+
+#define KB(x) ((x) * 1024)
+#define MB(x) (KB (KB (x)))
+#define GB(x) (MB (KB (x)))
+
+#define DRM_MINOR_NBITS (9)
+#define DRM_MINOR_SHIFT (0)
+#define DRM_MINOR_MAX ((1 << DRM_MINOR_NBITS) - 1)
+#define DRM_DEV2MINOR(dev) ((getminor(dev) >> DRM_MINOR_SHIFT) & DRM_MINOR_MAX)
+
+#define DRM_CLONEID_NBITS (NBITSMINOR - DRM_MINOR_NBITS)
+#define DRM_CLONEID_SHIFT (DRM_MINOR_NBITS)
+#define DRM_CLONEID_MAX ((1 << DRM_CLONEID_NBITS) - 1)
+#define DRM_DEV2CLONEID(dev) ((getminor(dev) >> DRM_CLONEID_SHIFT) & DRM_CLONEID_MAX)
+
+#define DRM_MAKEDEV(major_id, minor_id, clone_id) \
+ makedevice(major_id, (minor_id << DRM_MINOR_SHIFT) | (clone_id << DRM_CLONEID_SHIFT))
+
+/* driver capabilities and requirements mask */
+#define DRIVER_USE_AGP 0x1
+#define DRIVER_REQUIRE_AGP 0x2
+#define DRIVER_USE_MTRR 0x4
+#define DRIVER_PCI_DMA 0x8
+#define DRIVER_SG 0x10
+#define DRIVER_HAVE_DMA 0x20
+#define DRIVER_HAVE_IRQ 0x40
+#define DRIVER_IRQ_SHARED 0x80
+#define DRIVER_IRQ_VBL 0x100
+#define DRIVER_DMA_QUEUE 0x200
+#define DRIVER_FB_DMA 0x400
+#define DRIVER_IRQ_VBL2 0x800
+#define DRIVER_GEM 0x1000
+#define DRIVER_MODESET 0x2000
+#define DRIVER_PRIME 0x4000
+#define DRIVER_USE_PLATFORM_DEVICE 0x8000
+
+/* DRM space units */
+#define DRM_PAGE_SHIFT PAGESHIFT
+#define DRM_PAGE_SIZE (1 << DRM_PAGE_SHIFT)
+#define DRM_PAGE_OFFSET (DRM_PAGE_SIZE - 1)
+#define DRM_PAGE_MASK ~(DRM_PAGE_SIZE - 1)
+#define DRM_MB2PAGES(x) ((x) << 8)
+#define DRM_PAGES2BYTES(x) ((x) << DRM_PAGE_SHIFT)
+#define DRM_BYTES2PAGES(x) ((x) >> DRM_PAGE_SHIFT)
+#define DRM_PAGES2KB(x) ((x) << 2)
+#define DRM_ALIGNED(offset) (((offset) & DRM_PAGE_OFFSET) == 0)
+
+#define PAGE_SHIFT DRM_PAGE_SHIFT
+#define PAGE_SIZE DRM_PAGE_SIZE
+
+#define DRM_MAX_INSTANCES 8
+#define DRM_DEVNODE "drm"
+#define DRM_UNOPENED 0
+#define DRM_OPENED 1
+
+#define DRM_HASH_SIZE 16 /* Size of key hash table */
+#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
+#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
+
+#define DRM_MEM_DMA 0
+#define DRM_MEM_SAREA 1
+#define DRM_MEM_DRIVER 2
+#define DRM_MEM_MAGIC 3
+#define DRM_MEM_IOCTLS 4
+#define DRM_MEM_MAPS 5
+#define DRM_MEM_BUFS 6
+#define DRM_MEM_SEGS 7
+#define DRM_MEM_PAGES 8
+#define DRM_MEM_FILES 9
+#define DRM_MEM_QUEUES 10
+#define DRM_MEM_CMDS 11
+#define DRM_MEM_MAPPINGS 12
+#define DRM_MEM_BUFLISTS 13
+#define DRM_MEM_DRMLISTS 14
+#define DRM_MEM_TOTALDRM 15
+#define DRM_MEM_BOUNDDRM 16
+#define DRM_MEM_CTXBITMAP 17
+#define DRM_MEM_STUB 18
+#define DRM_MEM_SGLISTS 19
+#define DRM_MEM_AGPLISTS 20
+#define DRM_MEM_CTXLIST 21
+#define DRM_MEM_MM 22
+#define DRM_MEM_HASHTAB 23
+#define DRM_MEM_OBJECTS 24
+
+#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
+#define DRM_MAP_HASH_OFFSET 0x10000000
+#define DRM_MAP_HASH_ORDER 12
+#define DRM_OBJECT_HASH_ORDER 12
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+#define DRM_MM_INIT_MAX_PAGES 256
+
+
+/* Internal types and structures */
+#define DRM_ARRAY_SIZE(x) (sizeof (x) / sizeof (x[0]))
+#define DRM_MIN(a, b) ((a) < (b) ? (a) : (b))
+#define DRM_MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
+
+#define __OS_HAS_AGP 1
+
+#define DRM_DEV_MOD (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+#define DRM_DEV_UID 0
+#define DRM_DEV_GID 0
+
+#define DRM_CURRENTPID ddi_get_pid()
+#define DRM_SPINLOCK(l) mutex_enter(l)
+#define DRM_SPINUNLOCK(u) mutex_exit(u)
+#define DRM_UDELAY(d) udelay(d)
+#define DRM_MEMORYBARRIER()
+
+#define DRM_MINOR_UNASSIGNED 0
+#define DRM_MINOR_LEGACY 1
+#define DRM_MINOR_CONTROL 2
+#define DRM_MINOR_RENDER 3
+#define DRM_MINOR_VGATEXT 4
+#define DRM_MINOR_AGPMASTER 5
+
+#define DRM_MINOR_ID_BASE_LEGACY 0
+#define DRM_MINOR_ID_BASE_CONTROL 64
+#define DRM_MINOR_ID_BASE_RENDER 128
+#define DRM_MINOR_ID_BASE_VGATEXT 384
+#define DRM_MINOR_ID_BASE_AGPMASTER 448
+
+#define DRM_MINOR_ID_LIMIT_LEGACY 63
+#define DRM_MINOR_ID_LIMIT_CONTROL 127
+#define DRM_MINOR_ID_LIMIT_RENDER 383
+#define DRM_MINOR_ID_LIMIT_VGATEXT 447
+#define DRM_MINOR_ID_LIMIT_AGPMASTER 511
+
+#define DRM_MINOR_IS_LEGACY(minor_id) \
+ (minor_id >= DRM_MINOR_ID_BASE_LEGACY && minor_id < DRM_MINOR_ID_LIMIT_LEGACY)
+#define DRM_MINOR_IS_CONTROL(minor_id) \
+ (minor_id >= DRM_MINOR_ID_BASE_CONTROL && minor_id < DRM_MINOR_ID_LIMIT_CONTROL)
+#define DRM_MINOR_IS_RENDER(minor_id) \
+ (minor_id >= DRM_MINOR_ID_BASE_RENDER && minor_id < DRM_MINOR_ID_LIMIT_RENDER)
+#define DRM_MINOR_IS_VGATEXT(minor_id) \
+ (minor_id >= DRM_MINOR_ID_BASE_VGATEXT && minor_id < DRM_MINOR_ID_LIMIT_VGATEXT)
+#define DRM_MINOR_IS_AGPMASTER(minor_id) \
+ (minor_id >= DRM_MINOR_ID_BASE_AGPMASTER && minor_id < DRM_MINOR_ID_LIMIT_AGPMASTER)
+
+/* Legacy VGA regions */
+#define VGA_RSRC_NONE 0x00
+#define VGA_RSRC_LEGACY_IO 0x01
+#define VGA_RSRC_LEGACY_MEM 0x02
+#define VGA_RSRC_LEGACY_MASK (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
+/* Non-legacy access */
+#define VGA_RSRC_NORMAL_IO 0x04
+#define VGA_RSRC_NORMAL_MEM 0x08
+
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+
+typedef struct drm_file drm_file_t;
+typedef struct drm_device drm_device_t;
+typedef struct drm_driver drm_driver_t;
+
+#define DRM_COPYFROM_WITH_RETURN(dest, src, size) \
+ if (ddi_copyin((src), (dest), (size), 0)) { \
+ DRM_ERROR("copy from user failed"); \
+ return (-EFAULT); \
+ }
+
+#define DRM_COPYTO_WITH_RETURN(dest, src, size) \
+ if (ddi_copyout((src), (dest), (size), 0)) { \
+ DRM_ERROR("copy to user failed"); \
+ return (-EFAULT); \
+ }
+
+#define DRM_COPY_FROM_USER(dest, src, size) \
+ ddi_copyin((src), (dest), (size), 0) /* flag for src */
+
+#define DRM_COPY_TO_USER(dest, src, size) \
+ ddi_copyout((src), (dest), (size), 0) /* flags for dest */
+
+#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
+ ddi_copyin((arg2), (arg1), (arg3), 0)
+
+#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
+ ddi_copyout((arg2), arg1, arg3, 0)
+
+#define DRM_READ8(map, offset) \
+ *(volatile uint8_t *)((uintptr_t)((map)->handle) + (offset))
+#define DRM_READ16(map, offset) \
+ *(volatile uint16_t *)((uintptr_t)((map)->handle) + (offset))
+#define DRM_READ32(map, offset) \
+ *(volatile uint32_t *)((uintptr_t)((map)->handle) + (offset))
+#define DRM_READ64(map, offset) \
+ *(volatile uint64_t *)((uintptr_t)((map)->handle) + (offset))
+#define DRM_WRITE8(map, offset, val) \
+ *(volatile uint8_t *)((uintptr_t)((map)->handle) + (offset)) = (val)
+#define DRM_WRITE16(map, offset, val) \
+ *(volatile uint16_t *)((uintptr_t)((map)->handle) + (offset)) = (val)
+#define DRM_WRITE32(map, offset, val) \
+ *(volatile uint32_t *)((uintptr_t)((map)->handle) + (offset)) = (val)
+#define DRM_WRITE64(map, offset, val) \
+ *(volatile uint64_t *)((uintptr_t)((map)->handle) + (offset)) = (val)
+
+typedef struct drm_wait_queue {
+ kcondvar_t cv;
+ kmutex_t lock;
+}wait_queue_head_t;
+
+#define DRM_INIT_WAITQUEUE(q, pri) \
+{ \
+ mutex_init(&(q)->lock, NULL, MUTEX_DRIVER, pri); \
+ cv_init(&(q)->cv, NULL, CV_DRIVER, NULL); \
+}
+
+#define DRM_FINI_WAITQUEUE(q) \
+{ \
+ mutex_destroy(&(q)->lock); \
+ cv_destroy(&(q)->cv); \
+}
+
+#define DRM_WAKEUP(q) \
+{ \
+ mutex_enter(&(q)->lock); \
+ cv_broadcast(&(q)->cv); \
+ mutex_exit(&(q)->lock); \
+}
+
+#define DRM_WAIT_ON(ret, q, timeout, condition) \
+ mutex_enter(&(q)->lock); \
+ while (!(condition)) { \
+ ret = cv_reltimedwait_sig(&(q)->cv, &(q)->lock, timeout,\
+ TR_CLOCK_TICK); \
+ if (ret == -1) { \
+ ret = EBUSY; \
+ break; \
+ } else if (ret == 0) { \
+ ret = EINTR; \
+ break; \
+ } else { \
+ ret = 0; \
+ } \
+ } \
+ mutex_exit(&(q)->lock);
+
+#define DRM_WAIT(ret, q, condition) \
+mutex_enter(&(q)->lock); \
+if (!(condition)) { \
+ (void) cv_timedwait_sig(&(q)->cv, &(q)->lock, jiffies + 3 * DRM_HZ); \
+ ret = 0; \
+ if (!(condition)) { \
+ /* make sure we got what we want */ \
+ if (wait_for(condition, 3000)) \
+ ret = EBUSY; \
+ } \
+} \
+mutex_exit(&(q)->lock);
+
+/*
+#define DRM_GETSAREA() \
+{ \
+ drm_local_map_t *map; \
+ TAILQ_FOREACH(map, &dev->maplist, link) { \
+ if (map->type == _DRM_SHM && \
+ map->flags & _DRM_CONTAINS_LOCK) { \
+ dev_priv->sarea = map; \
+ break; \
+ } \
+ } \
+}
+*/
+
+#define wake_up_all DRM_WAKEUP
+
+/**
+ * Test that the hardware lock is held by the caller, returning otherwise.
+ *
+ * \param dev DRM device.
+ * \param filp file pointer of the caller.
+ */
+#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
+do { \
+ if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
+ _file_priv->master->lock.file_priv != _file_priv) { \
+ DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
+ __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
+ (void *)_file_priv->master->lock.file_priv, (void *)_file_priv); \
+ return -EINVAL; \
+ } \
+} while (*"\0")
+
+/**
+ * Copy and IOCTL return string to user space
+ */
+#define DRM_COPY(name, value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ if (DRM_COPY_TO_USER(name, value, len)) \
+ return (EFAULT); \
+ }
+
+#define DRM_IRQ_ARGS caddr_t arg
+#define IRQ_HANDLED DDI_INTR_CLAIMED
+#define IRQ_NONE DDI_INTR_UNCLAIMED
+
+enum {
+ DRM_IS_NOT_AGP,
+ DRM_IS_AGP,
+ DRM_MIGHT_BE_AGP
+};
+
+/* Capabilities taken from src/sys/dev/pci/pcireg.h. */
+#ifndef PCIY_AGP
+#define PCIY_AGP 0x02
+#endif
+
+#ifndef PCIY_EXPRESS
+#define PCIY_EXPRESS 0x10
+#endif
+
+#define PAGE_ALIGN(addr) (((addr) + DRM_PAGE_SIZE - 1) & DRM_PAGE_MASK)
+#define DRM_SUSER(p) (crgetsgid(p) == 0 || crgetsuid(p) == 0)
+
+/*
+ * wait for 400 milliseconds
+ */
+//#define DRM_HZ drv_usectohz(400000)
+#define DRM_HZ drv_usectohz(1000000)
+
+#define DRM_SUPPORT 1
+#define DRM_UNSUPPORT 0
+
+#define __OS_HAS_AGP 1
+
+#ifndef offsetof
+#define __offsetof(type, field) ((size_t)(&((type *)0)->field))
+#define offsetof(type, field) __offsetof(type, field)
+#endif
+
+typedef struct drm_pci_id_list {
+ int vendor;
+ int device;
+ unsigned long driver_data;
+} drm_pci_id_list_t;
+
+#define DRM_DEVICE drm_device_t *dev = dev1
+#define DRM_IOCTL_ARGS \
+ dev_t dev_id, struct drm_device *dev, void *data, struct drm_file *file, int ioctl_mode, cred_t *credp
+
+typedef int drm_ioctl_t(DRM_IOCTL_ARGS);
+
+#define DRM_AUTH 0x1
+#define DRM_MASTER 0x2
+#define DRM_ROOT_ONLY 0x4
+#define DRM_CONTROL_ALLOW 0x8
+#define DRM_UNLOCKED 0x10
+
+typedef struct drm_ioctl_desc {
+ drm_ioctl_t *func;
+ int flags;
+ int cmd;
+ unsigned int cmd_drv;
+ const char *name;
+ int (*copyin32)(void *dest, void *src);
+ int (*copyout32)(void *dest, void *src);
+} drm_ioctl_desc_t;
+
+/**
+ * Creates a driver or general drm_ioctl_desc array entry for the given
+ * ioctl, for use by drm_ioctl().
+ */
+#ifdef _MULTI_DATAMODEL
+#define DRM_IOCTL_DEF(ioctl, _func, _flags, _copyin32, _copyout32) \
+ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = _copyin32, .copyout32 = _copyout32}
+#else
+#define DRM_IOCTL_DEF(ioctl, _func, _flags, _copyin32, _copyout32) \
+ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = NULL, .copyout32 = NULL}
+#endif
+
+typedef struct drm_magic_entry {
+ drm_magic_t magic;
+ struct drm_file *priv;
+ struct drm_magic_entry *next;
+} drm_magic_entry_t;
+
+typedef struct drm_magic_head {
+ struct drm_magic_entry *head;
+ struct drm_magic_entry *tail;
+} drm_magic_head_t;
+
+typedef struct drm_buf {
+ int idx; /* Index into master buflist */
+ int total; /* Buffer size */
+ int order; /* log-base-2(total) */
+ int used; /* Amount of buffer in use (for DMA) */
+ unsigned long offset; /* Byte offset (used internally) */
+ void *address; /* Address of buffer */
+ unsigned long bus_address; /* Bus address of buffer */
+ struct drm_buf *next; /* Kernel-only: used for free list */
+ volatile int pending; /* On hardware DMA queue */
+ struct drm_file *file_priv;
+ /* Uniq. identifier of holding process */
+ int context; /* Kernel queue for this buffer */
+ enum {
+ DRM_LIST_NONE = 0,
+ DRM_LIST_FREE = 1,
+ DRM_LIST_WAIT = 2,
+ DRM_LIST_PEND = 3,
+ DRM_LIST_PRIO = 4,
+ DRM_LIST_RECLAIM = 5
+ } list; /* Which list we're on */
+
+ int dev_priv_size; /* Size of buffer private storage */
+ void *dev_private; /* Per-buffer private storage */
+} drm_buf_t;
+
+typedef struct drm_freelist {
+ int initialized; /* Freelist in use */
+ uint32_t count; /* Number of free buffers */
+ drm_buf_t *next; /* End pointer */
+
+ int low_mark; /* Low water mark */
+ int high_mark; /* High water mark */
+} drm_freelist_t;
+
+typedef struct drm_buf_entry {
+ int buf_size;
+ int buf_count;
+ drm_buf_t *buflist;
+ int seg_count;
+ int page_order;
+
+ uint32_t *seglist;
+ unsigned long *seglist_bus;
+
+ drm_freelist_t freelist;
+} drm_buf_entry_t;
+
+/* Event queued up for userspace to read */
+struct drm_pending_event {
+ struct drm_event *event;
+ struct list_head link;
+ struct drm_file *file_priv;
+ pid_t pid; /* pid of requester, no guarantee it's valid by the time
+ we deliver the event, for tracing only */
+ void (*destroy)(void *event, size_t size);
+};
+
+
+typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
+
+/**
+ * Kernel side of a mapping
+ */
+typedef struct drm_local_map {
+ unsigned long offset; /**< Requested physical address (0 for SAREA)*/
+ unsigned long size; /**< Requested physical size (bytes) */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
+ void *handle; /**< User-space: "Handle" to pass to mmap() */
+ /**< Kernel-space: kernel-virtual address */
+ int mtrr; /**< MTRR slot used */
+
+ /* OSOL_drm Begin */
+ ddi_acc_handle_t acc_handle; /**< The data access handle */
+ ddi_umem_cookie_t umem_cookie; /**< For SAREA alloc and free */
+ int callback; /** need callback ops to handle GTT mmap */
+ int gtt_mmap; /** gtt mmap has been setuped */
+ /* OSOL_drm End */
+} drm_local_map_t;
+
+/**
+ * Mappings list
+ */
+struct drm_map_list {
+ struct list_head head; /**< list head */
+ /* OSOL_drm: struct drm_hash_item hash; */
+ struct drm_local_map *map; /**< mapping */
+ uint64_t user_token;
+ struct drm_master *master;
+ struct drm_mm_node *file_offset_node; /**< fake offset */
+};
+
+struct drm_ctx_list {
+ struct list_head head; /**< list head */
+ drm_context_t handle; /**< context handle */
+ struct drm_file *tag; /**< associated fd private data */
+};
+
+struct drm_history_list {
+ struct list_head head;
+ char info[20]; /**< history info */
+ uint32_t cur_seq; /**< current system seqno */
+ uint32_t last_seq; /**< last seqno */
+ void *ring_ptr; /**< current ring ptr */
+};
+
+struct gem_map_list {
+ struct list_head head; /**< list head */
+ devmap_cookie_t dhp;
+ offset_t mapoffset;
+ size_t maplen;
+};
+
+/*
+ * This structure defines the drm_mm memory object, which will be used by the
+ * DRM for its buffer objects.
+ */
+struct drm_gem_object {
+ /* Reference count of this object */
+ struct kref refcount;
+
+ /* Handle count of this object. Each handle also holds a reference */
+ atomic_t handle_count;
+
+ /* Related drm device */
+ struct drm_device *dev;
+
+ /* Mapping info for this object */
+ struct drm_map_list map_list;
+
+ /*
+ * Size of the object, in bytes. Immutable over the object's
+ * lifetime.
+ */
+ size_t size;
+
+ /*
+ * Global name for this object, starts at 1. 0 means unnamed.
+ * Access is covered by the object_name_lock in the related drm_device
+ */
+ int name;
+
+ /*
+ * Memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated
+ */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
+ /*
+ * While validating an exec operation, the
+ * new read/write domain values are computed here.
+ * They will be transferred to the above values
+ * at the point that any cache flushing occurs
+ */
+ uint32_t pending_read_domains;
+ uint32_t pending_write_domain;
+
+ void *driver_private;
+
+ struct drm_map_list maplist;
+ ddi_dma_handle_t dma_hdl;
+ ddi_acc_handle_t acc_hdl;
+ caddr_t kaddr;
+ size_t real_size; /* real size of memory */
+ pfn_t *pfnarray;
+ caddr_t gtt_map_kaddr;
+
+ struct gfxp_pmem_cookie mempool_cookie;
+
+ struct list_head seg_list;
+
+ struct list_head track_list; /* for i915 mdb */
+ struct list_head his_list;
+};
+
+typedef struct drm_lock_data {
+ struct drm_hw_lock *hw_lock; /**< Hardware lock */
+ /** Private of lock holder's file (NULL=kernel) */
+ struct drm_file *file_priv;
+ kcondvar_t lock_cv; /* lock queue - SOLARIS Specific */
+ kmutex_t lock_mutex; /* lock - SOLARIS Specific */
+ unsigned long lock_time; /* Time of last lock in clock ticks */
+ uint32_t kernel_waiters;
+ uint32_t user_waiters;
+ int idle_has_lock;
+} drm_lock_data_t;
+
+/*
+ * This structure, in drm_device_t, is always initialized while the device
+ * is open. dev->dma_lock protects the incrementing of dev->buf_use, which
+ * when set marks that no further bufs may be allocated until device teardown
+ * occurs (when the last open of the device has closed). The high/low
+ * watermarks of bufs are only touched by the X Server, and thus not
+ * concurrently accessed, so no locking is needed.
+ */
+
+/**
+ * DMA data.
+ */
+typedef struct drm_device_dma {
+
+ struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
+ int buf_count; /**< total number of buffers */
+ struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
+ int seg_count;
+ int page_count; /**< number of pages */
+ unsigned long *pagelist; /**< page list */
+ unsigned long byte_count;
+ enum {
+ _DRM_DMA_USE_AGP = 0x01,
+ _DRM_DMA_USE_SG = 0x02,
+ _DRM_DMA_USE_FB = 0x04,
+ _DRM_DMA_USE_PCI_RO = 0x08
+ } flags;
+
+} drm_device_dma_t;
+
+/**
+ * AGP memory entry. Stored as a doubly linked list.
+ */
+typedef struct drm_agp_mem {
+ unsigned long handle; /**< handle */
+ unsigned long bound; /**< address */
+ int pages;
+ struct list_head head;
+} drm_agp_mem_t;
+
+/**
+ * AGP data.
+ *
+ * \sa drm_agp_init() and drm_device::agp.
+ */
+typedef struct drm_agp_head {
+ agp_info_t agp_info; /**< AGP device information */
+ struct list_head memory;
+ unsigned long mode; /**< AGP mode */
+ int enabled; /**< whether the AGP bus as been enabled */
+ int acquired; /**< whether the AGP device has been acquired */
+ unsigned long base;
+ int mtrr;
+ int cant_use_aperture;
+ unsigned long page_mask;
+ ldi_ident_t agpgart_li;
+ ldi_handle_t agpgart_lh;
+} drm_agp_head_t;
+
+typedef struct drm_dma_handle {
+ ddi_dma_handle_t dma_hdl;
+ ddi_acc_handle_t acc_hdl;
+ ddi_dma_cookie_t cookie;
+ uint_t cookie_num;
+ uintptr_t vaddr; /* virtual addr */
+ uintptr_t paddr; /* physical addr */
+ size_t real_sz; /* real size of memory */
+} drm_dma_handle_t;
+
+typedef struct drm_sg_mem {
+ unsigned long handle;
+ void *virtual;
+ int pages;
+ dma_addr_t *busaddr;
+
+ ddi_umem_cookie_t *umem_cookie;
+ drm_dma_handle_t *dmah_sg;
+ drm_dma_handle_t *dmah_gart; /* Handle to PCI memory */
+} drm_sg_mem_t;
+
+typedef TAILQ_HEAD(drm_vbl_sig_list, drm_vbl_sig) drm_vbl_sig_list_t;
+typedef struct drm_vbl_sig {
+ TAILQ_ENTRY(drm_vbl_sig) link;
+ unsigned int sequence;
+ int signo;
+ int pid;
+} drm_vbl_sig_t;
+
+
+/* used for clone device */
+typedef TAILQ_HEAD(drm_cminor_list, drm_cminor) drm_cminor_list_t;
+typedef struct drm_cminor {
+ TAILQ_ENTRY(drm_cminor) link;
+ drm_file_t *fpriv;
+ int minor;
+} drm_cminor_t;
+
+/* location of GART table */
+#define DRM_ATI_GART_MAIN 1
+#define DRM_ATI_GART_FB 2
+
+typedef struct ati_pcigart_info {
+ int gart_table_location;
+ int is_pcie;
+ void *addr;
+ dma_addr_t bus_addr;
+ drm_local_map_t mapping;
+} drm_ati_pcigart_info;
+
+/* Size of ringbuffer for vblank timestamps. Just double-buffer
+ * in initial implementation.
+ */
+#define DRM_VBLANKTIME_RBSIZE 2
+
+/* Flags and return codes for get_vblank_timestamp() driver function. */
+#define DRM_CALLED_FROM_VBLIRQ 1
+#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
+#define DRM_VBLANKTIME_INVBL (1 << 1)
+
+/* get_scanout_position() return flags */
+#define DRM_SCANOUTPOS_VALID (1 << 0)
+#define DRM_SCANOUTPOS_INVBL (1 << 1)
+#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
+
+/* DRM device structure */
+struct drm_device;
+
+struct drm_bus {
+ int bus_type;
+ int (*get_irq)(struct drm_device *dev);
+ const char *(*get_name)(struct drm_device *dev);
+ int (*set_busid)(struct drm_device *dev, struct drm_master *master);
+ int (*set_unique)(struct drm_device *dev, struct drm_master *master,
+ struct drm_unique *unique);
+ int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
+ /* hooks that are for PCI */
+ int (*agp_init)(struct drm_device *dev);
+
+};
+
+struct drm_driver {
+ int (*load)(struct drm_device *, unsigned long);
+ int (*firstopen)(struct drm_device *);
+ int (*open)(struct drm_device *, drm_file_t *);
+ void (*preclose)(struct drm_device *, drm_file_t *);
+ void (*postclose)(struct drm_device *, drm_file_t *);
+ void (*lastclose)(struct drm_device *);
+ int (*unload)(struct drm_device *);
+ void (*reclaim_buffers_locked)(struct drm_device *, drm_file_t *);
+ int (*presetup)(struct drm_device *);
+ int (*postsetup)(struct drm_device *);
+ int (*open_helper)(struct drm_device *, drm_file_t *);
+ void (*free_filp_priv)(struct drm_device *, drm_file_t *);
+ void (*release)(struct drm_device *, void *);
+ int (*dma_ioctl)(DRM_IOCTL_ARGS);
+ int (*dma_quiescent)(struct drm_device *);
+ int (*dma_flush_block_and_flush)(struct drm_device *,
+ int, drm_lock_flags_t);
+ int (*dma_flush_unblock)(struct drm_device *, int,
+ drm_lock_flags_t);
+ int (*context_dtor)(struct drm_device *, int);
+ int (*device_is_agp) (struct drm_device *);
+
+ /**
+ * Called by vblank timestamping code.
+ *
+ * Return the current display scanout position from a crtc.
+ *
+ * \param dev DRM device.
+ * \param crtc Id of the crtc to query.
+ * \param *vpos Target location for current vertical scanout position.
+ * \param *hpos Target location for current horizontal scanout position.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * \return Flags, or'ed together as follows:
+ *
+ * DRM_SCANOUTPOS_VALID = Query successfull.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a constant
+ * but unknown small number of scanlines wrt. real scanout position.
+ *
+ */
+ int (*get_scanout_position) (struct drm_device *dev, int crtc,
+ int *vpos, int *hpos);
+
+ /**
+ * Called by \c drm_get_last_vbltimestamp. Should return a precise
+ * timestamp when the most recent VBLANK interval ended or will end.
+ *
+ * Specifically, the timestamp in @vblank_time should correspond as
+ * closely as possible to the time when the first video scanline of
+ * the video frame after the end of VBLANK will start scanning out,
+ * the time immmediately after end of the VBLANK interval. If the
+ * @crtc is currently inside VBLANK, this will be a time in the future.
+ * If the @crtc is currently scanning out a frame, this will be the
+ * past start time of the current scanout. This is meant to adhere
+ * to the OpenML OML_sync_control extension specification.
+ *
+ * \param dev dev DRM device handle.
+ * \param crtc crtc for which timestamp should be returned.
+ * \param *max_error Maximum allowable timestamp error in nanoseconds.
+ * Implementation should strive to provide timestamp
+ * with an error of at most *max_error nanoseconds.
+ * Returns true upper bound on error for timestamp.
+ * \param *vblank_time Target location for returned vblank timestamp.
+ * \param flags 0 = Defaults, no special treatment needed.
+ * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
+ * irq handler. Some drivers need to apply some workarounds
+ * for gpu-specific vblank irq quirks if flag is set.
+ *
+ * \returns
+ * Zero if timestamping isn't supported in current display mode or a
+ * negative number on failure. A positive status code on success,
+ * which describes how the vblank_time timestamp was computed.
+ */
+ int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
+
+ void (*irq_preinstall)(struct drm_device *);
+ int (*irq_postinstall)(struct drm_device *);
+ void (*irq_uninstall)(struct drm_device *dev);
+
+ uint_t (*irq_handler)(DRM_IRQ_ARGS);
+ int (*vblank_wait)(struct drm_device *, unsigned int *);
+ int (*vblank_wait2)(struct drm_device *, unsigned int *);
+ /* added for intel minimized vblank */
+ u32 (*get_vblank_counter)(struct drm_device *dev, int crtc);
+ int (*enable_vblank)(struct drm_device *dev, int crtc);
+ void (*disable_vblank)(struct drm_device *dev, int crtc);
+
+ /* Master routines */
+ int (*master_create)(struct drm_device *dev, struct drm_master *master);
+ void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
+ /**
+ * master_set is called whenever the minor master is set.
+ * master_drop is called whenever the minor master is dropped.
+ */
+
+ int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_open);
+ void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_release);
+
+ void (*entervt)(struct drm_device *dev);
+ void (*leavevt)(struct drm_device *dev);
+ void (*agp_support_detect)(struct drm_device *dev, unsigned long flags);
+
+ /*
+ * Driver-specific constructor for drm_gem_objects, to set up
+ * obj->driver_private.
+ *
+ * Returns 0 on success.
+ */
+ int (*gem_init_object) (struct drm_gem_object *obj);
+ void (*gem_free_object) (struct drm_gem_object *obj);
+
+ int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
+ void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
+ void (*gem_fault) (struct drm_gem_object *obj);
+
+ /* vga arb irq handler */
+ void (*vgaarb_irq)(struct drm_device *dev, bool state);
+
+ /* dumb alloc support */
+ int (*dumb_create)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+ int (*dumb_map_offset)(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+ int (*dumb_destroy)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+
+ int major;
+ int minor;
+ int patchlevel;
+ char *name;
+ char *desc;
+ char *date;
+
+ u32 driver_features;
+ drm_ioctl_desc_t *ioctls;
+ int num_ioctls;
+
+ int buf_priv_size;
+
+ unsigned use_agp :1;
+ unsigned require_agp :1;
+ unsigned use_sg :1;
+ unsigned use_dma :1;
+ unsigned use_pci_dma :1;
+ unsigned use_dma_queue :1;
+ unsigned use_irq :1;
+ unsigned use_vbl_irq :1;
+ unsigned use_vbl_irq2 :1;
+ unsigned use_mtrr :1;
+ unsigned use_gem;
+ unsigned use_kms;
+
+ struct drm_pci_id_list *id_table;
+};
+#include "drm_crtc.h"
+struct drm_master {
+
+ struct kref refcount; /* refcount for this master */
+
+ struct list_head head; /**< each minor contains a list of masters */
+ struct drm_minor *minor; /**< link back to minor we are a master for */
+
+ char *unique; /**< Unique identifier: e.g., busid */
+ int unique_len; /**< Length of unique field */
+ int unique_size; /**< amount allocated */
+
+ int blocked; /**< Blocked due to VC switch? */
+
+ /** \name Authentication */
+ /*@{ */
+ drm_magic_head_t magiclist[DRM_HASH_SIZE];
+ /*@} */
+
+ struct drm_lock_data lock; /**< Information on hardware lock */
+
+ void *driver_priv; /**< Private structure for driver to use */
+};
+
+struct drm_minor {
+ int index; /**< Minor device number */
+ int type; /**< Control or render */
+ dev_t device;
+ struct drm_device *dev;
+ struct drm_master *master; /* currently active master for this node */
+ struct list_head master_list;
+ struct drm_mode_group mode_group;
+
+ char name[32];
+ struct idr clone_idr;
+ void *private;
+};
+
+/* mode specified on the command line */
+struct drm_cmdline_mode {
+ bool specified;
+ bool refresh_specified;
+ bool bpp_specified;
+ int xres, yres;
+ int bpp;
+ int refresh;
+ bool rb;
+ bool interlace;
+ bool cvt;
+ bool margins;
+ enum drm_connector_force force;
+};
+
+struct drm_pending_vblank_event {
+ struct drm_pending_event base;
+ int pipe;
+ struct drm_event_vblank event;
+};
+
+struct drm_file {
+ TAILQ_ENTRY(drm_file) link;
+
+ int authenticated;
+ pid_t pid;
+ uid_t uid;
+ drm_magic_t magic;
+ unsigned long ioctl_count;
+ struct list_head lhead;
+ struct drm_minor *minor;
+ unsigned long lock_count;
+
+ /** Mapping of mm object handles to object pointers. */
+ struct idr_list object_idr;
+ /** Lock for synchronization of access to object_idr. */
+ spinlock_t table_lock;
+
+ void *driver_priv;
+
+ int is_master; /* this file private is a master for a minor */
+ struct drm_master *master; /* master this node is currently associated with
+ N.B. not always minor->master */
+
+ /**
+ * fbs - List of framebuffers associated with this file.
+ *
+ * Protected by fbs_lock. Note that the fbs list holds a reference on
+ * the fb object to prevent it from untimely disappearing.
+ */
+ struct list_head fbs;
+ struct mutex fbs_lock;
+
+ wait_queue_head_t event_wait;
+ struct list_head event_list;
+ int event_space;
+ struct pollhead drm_pollhead;
+};
+
+/*
+ * hardware-specific code needs to initialize mutexes which
+ * can be used in interrupt context, so they need to know
+ * the interrupt priority. Interrupt cookie in drm_device
+ * structure is the intr_block field.
+ */
+#define DRM_INTR_PRI(dev) \
+ DDI_INTR_PRI((dev)->pdev->intr_block)
+
+struct drm_device {
+ int if_version; /**< Highest interface version set */
+
+ /** \name Locks */
+ /*@{ */
+ spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */
+ kmutex_t struct_mutex; /* < For others */
+ /*@} */
+
+ /** \name Usage Counters */
+ /*@{ */
+ int open_count; /**< Outstanding files open */
+ atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
+ int buf_use; /**< Buffers in use -- cannot alloc */
+ atomic_t buf_alloc; /**< Buffer allocation in progress */
+ /*@} */
+
+ /** \name Performance counters */
+ /*@{ */
+ unsigned long counters;
+ enum drm_stat_type types[15];
+ atomic_t counts[15];
+ /*@} */
+
+ struct list_head filelist;
+
+ /** \name Memory management */
+ /*@{ */
+ struct list_head maplist; /**< Linked list of regions */
+ /*@} */
+
+ /** \name Context handle management */
+ /*@{ */
+ struct list_head ctxlist; /**< Linked list of context handles */
+ int ctx_count; /**< Number of context handles */
+ struct mutex ctxlist_mutex; /**< For ctxlist */
+
+ struct idr ctx_idr;
+
+ /*@} */
+
+ /** \name DMA queues (contexts) */
+ /*@{ */
+ struct drm_device_dma *dma; /**< Optional pointer for DMA support */
+ /*@} */
+
+ /** \name Context support */
+ /*@{ */
+ int irq_enabled; /* True if the irq handler is enabled */
+ atomic_t context_flag; /* Context swapping flag */
+ wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
+ int last_context; /* Last current context */
+ unsigned long last_switch; /**< jiffies at last context switch */
+ /*@} */
+
+ struct workqueue_struct *drm_wq;
+ struct work_struct work;
+ /** \name VBLANK IRQ support */
+ /*@{ */
+
+ /*
+ * At load time, disabling the vblank interrupt won't be allowed since
+ * old clients may not call the modeset ioctl and therefore misbehave.
+ * Once the modeset ioctl *has* been called though, we can safely
+ * disable them when unused.
+ */
+ int vblank_disable_allowed;
+
+ wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
+ atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
+ struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
+ spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
+ spinlock_t vbl_lock;
+ atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
+ u32 *last_vblank; /* protected by dev->vbl_lock, used */
+ /* for wraparound handling */
+ int *vblank_enabled; /* so we don't call enable more than
+ once per disable */
+ int *vblank_inmodeset; /* Display driver is setting mode */
+ u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */
+ struct timer_list vblank_disable_timer;
+
+ u32 max_vblank_count; /**< size of vblank counter register */
+
+ /**
+ * List of events
+ */
+ struct list_head vblank_event_list;
+ spinlock_t event_lock;
+
+ /*@} */
+
+ struct drm_agp_head *agp; /**< AGP data */
+ ulong_t agp_aperbase;
+
+ struct device *dev; /**< Device structure */
+ struct pci_dev *pdev; /**< PCI device structure */
+ int pci_vendor; /**< PCI vendor id */
+ int pci_device; /**< PCI device id */
+
+ struct drm_sg_mem *sg; /**< Scatter gather memory */
+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
+ void *dev_private; /**< device private data */
+ void *mm_private;
+ struct drm_driver *driver;
+ struct drm_local_map *agp_buffer_map;
+ unsigned int agp_buffer_token;
+ struct drm_minor *control; /**< Control node for card */
+ struct drm_minor *primary; /**< render type primary screen head */
+
+ struct drm_mode_config mode_config; /**< Current mode config */
+ struct work_struct output_poll_work;
+ struct timer_list output_poll_timer;
+
+ /* \name GEM information */
+ /* @{ */
+ kmutex_t object_name_lock;
+ struct idr_list object_name_idr;
+ /* @} */
+ int switch_power_state;
+
+ /* OSOL Begin*/
+ dev_info_t *devinfo;
+ struct drm_minor *vgatext;
+ struct drm_minor *agpmaster;
+
+ struct idr map_idr;
+
+ /* Locks */
+ kmutex_t dma_lock; /* protects dev->dma */
+ kmutex_t irq_lock; /* protects irq condition checks */
+
+ kmutex_t page_fault_lock;
+
+ kstat_t *asoft_ksp; /* kstat support */
+
+ struct list_head gem_objects_list;
+ spinlock_t track_lock;
+
+ uint32_t *old_gtt;
+ size_t old_gtt_size;
+ uint32_t *gtt_dump;
+
+ /*
+ * FMA capabilities
+ */
+ int drm_fm_cap;
+ /* OSOL End */
+};
+
+#define DRM_SWITCH_POWER_ON 0
+#define DRM_SWITCH_POWER_OFF 1
+#define DRM_SWITCH_POWER_CHANGING 2
+
+static __inline__ int drm_core_check_feature(struct drm_device *dev,
+ int feature)
+{
+ return ((dev->driver->driver_features & feature) ? 1 : 0);
+}
+#if __OS_HAS_AGP
+static inline int drm_core_has_AGP(struct drm_device *dev)
+{
+ return drm_core_check_feature(dev, DRIVER_USE_AGP);
+}
+#else
+#define drm_core_has_AGP(dev) (0)
+#endif
+
+extern struct idr drm_minors_idr;
+extern struct cb_ops drm_cb_ops;
+
+void *drm_alloc(size_t, int);
+void *drm_calloc(size_t, size_t, int);
+void *drm_realloc(void *, size_t, size_t, int);
+void drm_free(void *, size_t, int);
+int drm_ioremap(drm_device_t *, drm_local_map_t *);
+void drm_ioremapfree(drm_local_map_t *);
+void *drm_sun_ioremap(uint64_t paddr, size_t size, uint32_t mode);
+void drm_sun_iounmap(void *addr);
+void drm_core_ioremap(struct drm_local_map *, struct drm_device *);
+void drm_core_ioremapfree(struct drm_local_map *, struct drm_device *);
+
+void drm_pci_free(drm_dma_handle_t *);
+void *drm_pci_alloc(drm_device_t *, size_t, size_t, dma_addr_t, int);
+
+int drm_ctxbitmap_init(drm_device_t *);
+void drm_ctxbitmap_cleanup(drm_device_t *);
+void drm_ctxbitmap_free(drm_device_t *, int);
+
+/* Locking IOCTL support (drm_lock.c) */
+int drm_lock_take(struct drm_lock_data *, unsigned int);
+int drm_lock_free(struct drm_lock_data *, unsigned int);
+int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
+
+/* Buffer management support (drm_bufs.c) */
+extern int drm_map_handle(struct drm_device *dev, struct drm_map_list *list);
+unsigned long drm_get_resource_start(drm_device_t *, unsigned int);
+unsigned long drm_get_resource_len(drm_device_t *, unsigned int);
+int drm_initmap(drm_device_t *, unsigned long, unsigned long,
+ unsigned int, int, int);
+extern int drm_rmmap(struct drm_device *, struct drm_local_map *);
+extern int drm_addmap(struct drm_device *, unsigned long, unsigned long,
+ enum drm_map_type, enum drm_map_flags, struct drm_local_map **);
+int drm_order(unsigned long);
+int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map);
+
+/* DMA support (drm_dma.c) */
+int drm_dma_setup(drm_device_t *);
+void drm_dma_takedown(drm_device_t *);
+void drm_free_buffer(drm_device_t *, drm_buf_t *);
+void drm_core_reclaim_buffers(drm_device_t *, drm_file_t *);
+#define drm_reclaim_buffers drm_core_reclaim_buffers
+
+/* IRQ support (drm_irq.c) */
+int drm_irq_install(drm_device_t *);
+int drm_irq_uninstall(drm_device_t *);
+uint_t drm_irq_handler(DRM_IRQ_ARGS);
+void drm_driver_irq_preinstall(drm_device_t *);
+void drm_driver_irq_postinstall(drm_device_t *);
+void drm_driver_irq_uninstall(drm_device_t *);
+int drm_vblank_wait(drm_device_t *, unsigned int *);
+u32 drm_vblank_count(struct drm_device *dev, int crtc);
+u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime);
+extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
+ struct drm_pending_vblank_event *e);
+bool drm_handle_vblank(struct drm_device *dev, int crtc);
+int drm_vblank_get(struct drm_device *dev, int crtc);
+void drm_vblank_put(struct drm_device *dev, int crtc);
+void drm_vblank_off(struct drm_device *dev, int crtc);
+extern void drm_vblank_off(struct drm_device *dev, int crtc);
+int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+void drm_vblank_cleanup(struct drm_device *dev);
+u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags);
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ int crtc, int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ struct drm_crtc *refcrtc);
+void drm_calc_timestamping_constants(struct drm_crtc *crtc);
+
+extern struct drm_display_mode *
+drm_mode_create_from_cmdline_mode(struct drm_device *dev,
+ struct drm_cmdline_mode *cmd);
+
+/* Modesetting support */
+int drm_modeset_ctl(DRM_IOCTL_ARGS);
+void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
+void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
+int pci_enable_msi(struct pci_dev *pdev);
+void pci_disable_msi(struct pci_dev *pdev);
+/* AGP/GART support (drm_agpsupport.h) */
+extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
+extern int drm_agp_acquire(struct drm_device *dev);
+extern int drm_agp_acquire_ioctl(DRM_IOCTL_ARGS);
+extern int drm_agp_release(struct drm_device *dev);
+extern int drm_agp_release_ioctl(DRM_IOCTL_ARGS);
+extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
+extern int drm_agp_enable_ioctl(DRM_IOCTL_ARGS);
+extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
+extern int drm_agp_info_ioctl(DRM_IOCTL_ARGS);
+extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
+extern int drm_agp_alloc_ioctl(DRM_IOCTL_ARGS);
+extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
+extern int drm_agp_free_ioctl(DRM_IOCTL_ARGS);
+extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
+extern int drm_agp_unbind_ioctl(DRM_IOCTL_ARGS);
+extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
+extern int drm_agp_bind_ioctl(DRM_IOCTL_ARGS);
+extern void *drm_agp_allocate_memory(size_t pages, uint32_t type, struct drm_device *dev);
+extern int drm_agp_free_memory(agp_allocate_t *handle, drm_device_t *dev);
+extern int drm_agp_bind_memory(unsigned int, uint32_t, drm_device_t *dev);
+extern int drm_agp_unbind_memory(unsigned long, drm_device_t *dev);
+extern void drm_agp_chipset_flush(struct drm_device *dev);
+
+extern void drm_agp_cleanup(drm_device_t *);
+extern int drm_agp_bind_pages(struct drm_device *dev, pfn_t *pages,
+ unsigned long num_pages, uint32_t gtt_offset, unsigned int agp_type);
+extern int drm_agp_unbind_pages(struct drm_device *dev, pfn_t *pages,
+ unsigned long num_pages, uint32_t gtt_offset, pfn_t scratch, uint32_t type);
+extern int drm_agp_rw_gtt(struct drm_device *dev, unsigned long num_pages,
+ uint32_t gtt_offset, void *gttp, uint32_t type);
+
+/* kstat support (drm_kstats.c) */
+int drm_init_kstats(drm_device_t *);
+void drm_fini_kstats(drm_device_t *);
+
+/* Scatter Gather Support (drm_scatter.c) */
+extern void drm_sg_cleanup(drm_sg_mem_t *);
+
+/* ATI PCIGART support (ati_pcigart.c) */
+int drm_ati_pcigart_init(drm_device_t *, drm_ati_pcigart_info *);
+int drm_ati_pcigart_cleanup(drm_device_t *, drm_ati_pcigart_info *);
+
+ /* Locking IOCTL support (drm_lock.h) */
+extern int drm_lock(DRM_IOCTL_ARGS);
+extern int drm_unlock(DRM_IOCTL_ARGS);
+extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
+extern void drm_idlelock_take(struct drm_lock_data *lock_data);
+extern void drm_idlelock_release(struct drm_lock_data *lock_data);
+
+int drm_setversion(DRM_IOCTL_ARGS);
+struct drm_local_map *drm_getsarea(struct drm_device *dev);
+/* Cache management (drm_cache.c) */
+void drm_clflush_pages(caddr_t *pages, unsigned long num_pages);
+
+/* Misc. IOCTL support (drm_ioctl.c) */
+int drm_irq_by_busid(DRM_IOCTL_ARGS);
+int drm_getunique(DRM_IOCTL_ARGS);
+int drm_setunique(DRM_IOCTL_ARGS);
+int drm_getmap(DRM_IOCTL_ARGS);
+int drm_getclient(DRM_IOCTL_ARGS);
+int drm_getstats(DRM_IOCTL_ARGS);
+int drm_getcap(DRM_IOCTL_ARGS);
+int drm_noop(DRM_IOCTL_ARGS);
+
+/* Context IOCTL support (drm_context.c) */
+int drm_resctx(DRM_IOCTL_ARGS);
+int drm_addctx(DRM_IOCTL_ARGS);
+int drm_modctx(DRM_IOCTL_ARGS);
+int drm_getctx(DRM_IOCTL_ARGS);
+int drm_switchctx(DRM_IOCTL_ARGS);
+int drm_newctx(DRM_IOCTL_ARGS);
+int drm_rmctx(DRM_IOCTL_ARGS);
+int drm_setsareactx(DRM_IOCTL_ARGS);
+int drm_getsareactx(DRM_IOCTL_ARGS);
+
+/* Drawable IOCTL support (drm_drawable.c) */
+int drm_adddraw(DRM_IOCTL_ARGS);
+int drm_rmdraw(DRM_IOCTL_ARGS);
+int drm_update_drawable_info(DRM_IOCTL_ARGS);
+struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
+ drm_drawable_t id);
+void drm_drawable_free_all(struct drm_device *dev);
+
+
+ /* Authentication IOCTL support (drm_auth.h) */
+int drm_getmagic(DRM_IOCTL_ARGS);
+int drm_authmagic(DRM_IOCTL_ARGS);
+int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
+
+/* Buffer management support (drm_bufs.c) */
+int drm_addmap_ioctl(DRM_IOCTL_ARGS);
+int drm_rmmap_ioctl(DRM_IOCTL_ARGS);
+int drm_addbufs(DRM_IOCTL_ARGS);
+int drm_infobufs(DRM_IOCTL_ARGS);
+int drm_markbufs(DRM_IOCTL_ARGS);
+int drm_freebufs(DRM_IOCTL_ARGS);
+int drm_mapbufs(DRM_IOCTL_ARGS);
+
+/* IRQ support (drm_irq.c) */
+int drm_control(DRM_IOCTL_ARGS);
+int drm_wait_vblank(DRM_IOCTL_ARGS);
+
+/* Scatter Gather Support (drm_scatter.c) */
+int drm_sg_alloc_ioctl(DRM_IOCTL_ARGS);
+int drm_sg_free(DRM_IOCTL_ARGS);
+
+extern int drm_debug_flag;
+extern int mdb_track_enable;
+
+extern struct list_head drm_iomem_list;
+
+#define MDB_TRACK_ENABLE mdb_track_enable
+
+#define TRACE_GEM_OBJ_HISTORY(obj, str) \
+do { \
+ if (MDB_TRACK_ENABLE) { \
+ int seq_t = 0; \
+ if (obj->ring) \
+ seq_t = obj->ring->get_seqno(obj->ring, true); \
+ drm_gem_object_track(&obj->base, str, \
+ seq_t, 0, \
+ (void *)obj->ring); \
+ } \
+} while (*"\0")
+
+/* We add function to support DRM_DEBUG,DRM_ERROR,DRM_INFO */
+/* PRINTFLIKE4 */
+extern void drm_debug_print(int cmn_err, const char* func, int line, const char *fmt, ...);
+/* PRINTFLIKE1 */
+extern void drm_debug(const char *fmt, ...);
+/* PRINTFLIKE1 */
+extern void drm_error(const char *fmt, ...);
+/* PRINTFLIKE1 */
+extern void drm_info(const char *fmt, ...);
+
+#ifdef DEBUG
+#define DRM_DEBUG(...) \
+ do { \
+ if (drm_debug_flag & 0x08) \
+ drm_debug_print( \
+ CE_NOTE, __func__, __LINE__, ##__VA_ARGS__); \
+ } while (__lintzero)
+#define DRM_DEBUG_KMS(...) \
+ do { \
+ if (drm_debug_flag & 0x04) \
+ drm_debug_print( \
+ CE_NOTE, __func__, __LINE__, ##__VA_ARGS__); \
+ } while (__lintzero)
+#define DRM_DEBUG_DRIVER(...) \
+ do { \
+ if (drm_debug_flag & 0x02) \
+ drm_debug_print( \
+ CE_NOTE, __func__, __LINE__, ##__VA_ARGS__); \
+ } while (__lintzero)
+#define DRM_INFO(...) \
+ do { \
+ if (drm_debug_flag & 0x01) \
+ drm_debug_print( \
+ CE_NOTE, __func__, __LINE__, ##__VA_ARGS__); \
+ } while (__lintzero)
+#else
+#define DRM_DEBUG(...) do { } while (__lintzero)
+#define DRM_DEBUG_KMS(...) do { } while (__lintzero)
+#define DRM_DEBUG_DRIVER(...) do { } while (__lintzero)
+#define DRM_INFO(...) do { } while (__lintzero)
+#endif
+
+#define DRM_ERROR(...) \
+ drm_debug_print(CE_WARN, __func__, __LINE__, ##__VA_ARGS__)
+
+#define DRM_LOG_KMS DRM_INFO
+
+extern int drm_lastclose(struct drm_device *dev);
+
+extern int drm_open(struct drm_minor *, int, int, cred_t *);
+extern int drm_release(struct drm_file *);
+extern ssize_t drm_read(struct drm_file *, struct uio *);
+extern short drm_poll(struct drm_file *, short);
+extern int drm_init(struct drm_device *, struct drm_driver *);
+extern void drm_exit(struct drm_device *);
+extern void drm_fm_init(struct drm_device *dev);
+extern void drm_fm_fini(struct drm_device *dev);
+extern void drm_fm_ereport(struct drm_device *dev, char *detail);
+extern int drm_check_dma_handle(struct drm_device *dev, ddi_dma_handle_t handle);
+extern int drm_check_acc_handle(struct drm_device *dev, ddi_acc_handle_t handle);
+extern int drm_ioctl(dev_t dev_id, struct drm_file *file_priv,
+ int cmd, intptr_t arg, int mode, cred_t *credp);
+
+extern int drm_put_minor(struct drm_minor **minor_p);
+
+extern int drm_setmaster_ioctl(DRM_IOCTL_ARGS);
+extern int drm_dropmaster_ioctl(DRM_IOCTL_ARGS);
+extern struct drm_master *drm_master_create(struct drm_minor *minor);
+extern struct drm_master *drm_master_get(struct drm_master *master);
+extern void drm_master_put(struct drm_master **master);
+extern int drm_get_dev(struct drm_device *dev, struct pci_dev *pdev,
+ struct drm_driver *driver, unsigned long driver_data);
+extern void drm_put_dev(struct drm_device *dev);
+
+void drm_master_release(struct drm_device *dev, struct drm_file *fpriv);
+
+extern unsigned int drm_vblank_offdelay;
+extern unsigned int drm_timestamp_precision;
+
+/* sysfs support (drm_sysfs.c) */
+extern int drm_sysfs_device_add(struct drm_minor *minor);
+extern void drm_sysfs_device_remove(struct drm_minor *minor);
+
+/* Graphics Execution Manager library functions (drm_gem.c) */
+int drm_gem_init(struct drm_device *dev);
+void drm_gem_destroy(struct drm_device *dev);
+void drm_gem_object_release(struct drm_gem_object *obj);
+void drm_gem_object_free(struct kref *kref);
+void drm_gem_object_free_unlocked(struct kref *kref);
+struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
+ size_t size);
+int drm_gem_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size, int gen);
+int drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size);
+void drm_gem_object_handle_free(struct drm_gem_object *obj);
+
+extern void
+drm_gem_object_reference(struct drm_gem_object *obj);
+
+extern void
+drm_gem_object_unreference(struct drm_gem_object *obj);
+
+extern void
+drm_gem_object_unreference_unlocked(struct drm_gem_object *obj);
+
+int drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep);
+int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
+
+extern void
+drm_gem_object_handle_reference(struct drm_gem_object *obj);
+
+extern void
+drm_gem_object_handle_unreference(struct drm_gem_object *obj);
+
+extern void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj);
+
+struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
+ struct drm_file *filp,
+ u32 handle);
+int drm_gem_close_ioctl(DRM_IOCTL_ARGS);
+int drm_gem_flink_ioctl(DRM_IOCTL_ARGS);
+int drm_gem_open_ioctl(DRM_IOCTL_ARGS);
+void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
+void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
+void drm_gem_mmap(struct drm_gem_object *obj, pfn_t pfn);
+void drm_gem_release_mmap(struct drm_gem_object *obj);
+void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
+
+extern struct drm_local_map *drm_core_findmap(struct drm_device *dev,
+ unsigned int token);
+
+#ifdef NEVER
+static __inline__ int drm_device_is_pcie(struct drm_device *dev)
+{
+ return 0;
+}
+#endif
+
+void drm_gem_object_track(struct drm_gem_object *obj, const char *name,
+ uint32_t cur_seq, uint32_t last_seq, void* ptr);
+
+#endif /* _DRMP_H */
diff --git a/usr/src/uts/common/io/drm/drm_atomic.h b/usr/src/uts/common/drm/drm_atomic.h
index 0adc70c..7c6b7c6 100644
--- a/usr/src/uts/common/io/drm/drm_atomic.h
+++ b/usr/src/uts/common/drm/drm_atomic.h
@@ -1,7 +1,9 @@
+/* BEGIN CSTYLED */
+
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*/
+
/*
* \file drm_atomic.h
* Atomic operations used in the DRM which may or may not be provided by the OS.
@@ -11,7 +13,7 @@
/*
* Copyright 2004 Eric Anholt
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2012, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -55,14 +57,22 @@ typedef uint32_t atomic_t;
#define atomic_set(p, v) (*(p) = (v))
#define atomic_read(p) (*(p))
-#define atomic_inc(p) atomic_inc_uint(p)
+#define atomic_inc(p) atomic_add_int(p, 1)
#define atomic_dec(p) atomic_dec_uint(p)
+#define atomic_dec_and_test(p) \
+ ((0 == atomic_dec_32_nv(p)) ? 1 : 0)
#define atomic_add(n, p) atomic_add_int(p, n)
+#define atomic_add_return(n, p) (atomic_add_int(p, n), *p)
#define atomic_sub(n, p) atomic_add_int(p, -n)
#define atomic_set_int(p, bits) atomic_or_uint(p, bits)
#define atomic_clear_int(p, bits) atomic_and_uint(p, ~(bits))
#define atomic_cmpset_int(p, c, n) \
((c == atomic_cas_uint(p, c, n)) ? 1 : 0)
+#define atomic_clear_mask(mask, p) (*(p) &= ~mask)
+#define atomic_set_mask(mask, p) (*(p) |= mask)
+#define atomic_inc_not_zero(p) \
+ if (atomic_read(p) != 0) \
+ atomic_inc(p);
#define set_bit(b, p) \
atomic_set_int(((volatile uint_t *)(void *)p) + (b >> 5), \
diff --git a/usr/src/uts/common/drm/drm_core.h b/usr/src/uts/common/drm/drm_core.h
new file mode 100644
index 0000000..1ef6fc7
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_core.h
@@ -0,0 +1,49 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_CORE_H__
+#define __DRM_CORE_H__
+
+#define CORE_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
+
+#define CORE_NAME "drm"
+#define CORE_DESC "DRM shared core routines"
+#define CORE_DATE "20060810"
+
+#define DRM_IF_MAJOR 1
+#define DRM_IF_MINOR 4
+
+#define CORE_MAJOR 1
+#define CORE_MINOR 1
+#define CORE_PATCHLEVEL 0
+
+#endif /* __DRM_CORE_H__ */
diff --git a/usr/src/uts/common/drm/drm_crtc.h b/usr/src/uts/common/drm/drm_crtc.h
new file mode 100644
index 0000000..8d254d7
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_crtc.h
@@ -0,0 +1,1068 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_CRTC_H__
+#define __DRM_CRTC_H__
+
+#include <sys/ksynch.h>
+#include "drm.h"
+#include "drmP.h"
+#include "drm_sun_idr.h"
+#include "drm_sun_i2c.h"
+#include "drm_fourcc.h"
+
+struct drm_device;
+struct drm_mode_set;
+struct drm_framebuffer;
+struct drm_object_properties;
+
+
+#define DRM_MODE_OBJECT_CRTC 0xcccccccc
+#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
+#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
+#define DRM_MODE_OBJECT_MODE 0xdededede
+#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
+#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
+#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+
+struct drm_mode_object {
+ uint32_t id;
+ uint32_t type;
+ struct drm_object_properties *properties;
+};
+
+#define DRM_OBJECT_MAX_PROPERTY 24
+struct drm_object_properties {
+ int count;
+ uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
+ uint64_t values[DRM_OBJECT_MAX_PROPERTY];
+};
+
+/*
+ * Note on terminology: here, for brevity and convenience, we refer to connector
+ * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
+ * DVI, etc. And 'screen' refers to the whole of the visible display, which
+ * may span multiple monitors (and therefore multiple CRTC and connector
+ * structures).
+ */
+
+enum drm_mode_status {
+ MODE_OK = 0, /* Mode OK */
+ MODE_HSYNC, /* hsync out of range */
+ MODE_VSYNC, /* vsync out of range */
+ MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
+ MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
+ MODE_BAD_WIDTH, /* requires an unsupported linepitch */
+ MODE_NOMODE, /* no mode with a maching name */
+ MODE_NO_INTERLACE, /* interlaced mode not supported */
+ MODE_NO_DBLESCAN, /* doublescan mode not supported */
+ MODE_NO_VSCAN, /* multiscan mode not supported */
+ MODE_MEM, /* insufficient video memory */
+ MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
+ MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
+ MODE_MEM_VIRT, /* insufficient video memory given virtual size */
+ MODE_NOCLOCK, /* no fixed clock available */
+ MODE_CLOCK_HIGH, /* clock required is too high */
+ MODE_CLOCK_LOW, /* clock required is too low */
+ MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
+ MODE_BAD_HVALUE, /* horizontal timing was out of range */
+ MODE_BAD_VVALUE, /* vertical timing was out of range */
+ MODE_BAD_VSCAN, /* VScan value out of range */
+ MODE_HSYNC_NARROW, /* horizontal sync too narrow */
+ MODE_HSYNC_WIDE, /* horizontal sync too wide */
+ MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
+ MODE_HBLANK_WIDE, /* horizontal blanking too wide */
+ MODE_VSYNC_NARROW, /* vertical sync too narrow */
+ MODE_VSYNC_WIDE, /* vertical sync too wide */
+ MODE_VBLANK_NARROW, /* vertical blanking too narrow */
+ MODE_VBLANK_WIDE, /* vertical blanking too wide */
+ MODE_PANEL, /* exceeds panel dimensions */
+ MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
+ MODE_ONE_WIDTH, /* only one width is supported */
+ MODE_ONE_HEIGHT, /* only one height is supported */
+ MODE_ONE_SIZE, /* only one resolution is supported */
+ MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
+ MODE_UNVERIFIED = -3, /* mode needs to reverified */
+ MODE_BAD = -2, /* unspecified reason */
+ MODE_ERROR = -1 /* error condition */
+};
+
+#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
+ DRM_MODE_TYPE_CRTC_C)
+
+#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
+ .name = nm, .status = 0, .type = (t), .clock = (c), \
+ .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
+ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
+ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
+ .vscan = (vs), .flags = (f), \
+ .base.type = DRM_MODE_OBJECT_MODE
+
+#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
+
+struct drm_display_mode {
+ /* Header */
+ struct list_head head;
+ struct drm_mode_object base;
+
+ char name[DRM_DISPLAY_MODE_LEN];
+
+ enum drm_mode_status status;
+ unsigned int type;
+
+ /* Proposed mode values */
+ int clock; /* in kHz */
+ int hdisplay;
+ int hsync_start;
+ int hsync_end;
+ int htotal;
+ int hskew;
+ int vdisplay;
+ int vsync_start;
+ int vsync_end;
+ int vtotal;
+ int vscan;
+ unsigned int flags;
+
+ /* Addressable image size (may be 0 for projectors, etc.) */
+ int width_mm;
+ int height_mm;
+
+ /* Actual mode we give to hw */
+ int clock_index;
+ int synth_clock;
+ int crtc_hdisplay;
+ int crtc_hblank_start;
+ int crtc_hblank_end;
+ int crtc_hsync_start;
+ int crtc_hsync_end;
+ int crtc_htotal;
+ int crtc_hskew;
+ int crtc_vdisplay;
+ int crtc_vblank_start;
+ int crtc_vblank_end;
+ int crtc_vsync_start;
+ int crtc_vsync_end;
+ int crtc_vtotal;
+
+ /* Driver private mode info */
+ int private_size;
+ int *private;
+ int private_flags;
+
+ int vrefresh; /* in Hz */
+ int hsync; /* in kHz */
+};
+
+enum drm_connector_status {
+ connector_status_connected = 1,
+ connector_status_disconnected = 2,
+ connector_status_unknown = 3,
+};
+
+enum subpixel_order {
+ SubPixelUnknown = 0,
+ SubPixelHorizontalRGB,
+ SubPixelHorizontalBGR,
+ SubPixelVerticalRGB,
+ SubPixelVerticalBGR,
+ SubPixelNone,
+};
+
+#define DRM_COLOR_FORMAT_RGB444 (1<<0)
+#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
+#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
+/*
+ * Describes a given display (e.g. CRT or flat panel) and its limitations.
+ */
+struct drm_display_info {
+ char name[DRM_DISPLAY_INFO_LEN];
+
+ /* Physical size */
+ unsigned int width_mm;
+ unsigned int height_mm;
+
+ /* Clock limits FIXME: storage format */
+ unsigned int min_vfreq, max_vfreq;
+ unsigned int min_hfreq, max_hfreq;
+ unsigned int pixel_clock;
+ unsigned int bpc;
+
+ enum subpixel_order subpixel_order;
+ u32 color_formats;
+
+ u8 cea_rev;
+};
+
+struct drm_framebuffer_funcs {
+ /* note: use drm_framebuffer_remove() */
+ void (*destroy)(struct drm_framebuffer *framebuffer);
+ int (*create_handle)(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle);
+ /**
+ * Optinal callback for the dirty fb ioctl.
+ *
+ * Userspace can notify the driver via this callback
+ * that a area of the framebuffer has changed and should
+ * be flushed to the display hardware.
+ *
+ * See documentation in drm_mode.h for the struct
+ * drm_mode_fb_dirty_cmd for more information as all
+ * the semantics and arguments have a one to one mapping
+ * on this function.
+ */
+ int (*dirty)(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv, unsigned flags,
+ unsigned color, struct drm_clip_rect *clips,
+ unsigned num_clips);
+};
+
+struct drm_framebuffer {
+ struct drm_device *dev;
+ /*
+ * Note that the fb is refcounted for the benefit of driver internals,
+ * for example some hw, disabling a CRTC/plane is asynchronous, and
+ * scanout does not actually complete until the next vblank. So some
+ * cleanup (like releasing the reference(s) on the backing GEM bo(s))
+ * should be deferred. In cases like this, the driver would like to
+ * hold a ref to the fb even though it has already been removed from
+ * userspace perspective.
+ */
+ struct kref refcount;
+ /*
+ * Place on the dev->mode_config.fb_list, access protected by
+ * dev->mode_config.fb_lock.
+ */
+ struct list_head head;
+ struct drm_mode_object base;
+ const struct drm_framebuffer_funcs *funcs;
+ unsigned int pitches[4];
+ unsigned int offsets[4];
+ unsigned int width;
+ unsigned int height;
+ /* depth can be 15 or 16 */
+ unsigned int depth;
+ int bits_per_pixel;
+ int flags;
+ uint32_t pixel_format; /* fourcc format */
+ struct list_head filp_head;
+ /* if you are using the helper */
+ void *helper_private;
+};
+
+struct drm_property_blob {
+ struct drm_mode_object base;
+ struct list_head head;
+ unsigned int length;
+ unsigned char data[];
+};
+
+struct drm_property_enum {
+ uint64_t value;
+ struct list_head head;
+ char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_property {
+ struct list_head head;
+ struct drm_mode_object base;
+ uint32_t flags;
+ char name[DRM_PROP_NAME_LEN];
+ uint32_t num_values;
+ uint64_t *values;
+
+ struct list_head enum_blob_list;
+};
+
+struct drm_crtc;
+struct drm_connector;
+struct drm_encoder;
+struct drm_pending_vblank_event;
+struct drm_plane;
+
+/**
+ * drm_crtc_funcs - control CRTCs for a given device
+ * @save: save CRTC state
+ * @restore: restore CRTC state
+ * @reset: reset CRTC after state has been invalidate (e.g. resume)
+ * @cursor_set: setup the cursor
+ * @cursor_move: move the cursor
+ * @gamma_set: specify color ramp for CRTC
+ * @destroy: deinit and free object
+ * @set_property: called when a property is changed
+ * @set_config: apply a new CRTC configuration
+ * @page_flip: initiate a page flip
+ *
+ * The drm_crtc_funcs structure is the central CRTC management structure
+ * in the DRM. Each CRTC controls one or more connectors (note that the name
+ * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc.
+ * connectors, not just CRTs).
+ *
+ * Each driver is responsible for filling out this structure at startup time,
+ * in addition to providing other modesetting features, like i2c and DDC
+ * bus accessors.
+ */
+struct drm_crtc_funcs {
+ /* Save CRTC state */
+ void (*save)(struct drm_crtc *crtc); /* suspend? */
+ /* Restore CRTC state */
+ void (*restore)(struct drm_crtc *crtc); /* resume? */
+ /* Reset CRTC state */
+ void (*reset)(struct drm_crtc *crtc);
+
+ /* cursor controls */
+ int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height);
+ int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height,
+ int32_t hot_x, int32_t hot_y);
+ int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
+
+ /* Set gamma on the CRTC */
+ void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size);
+ /* Object destroy routine */
+ void (*destroy)(struct drm_crtc *crtc);
+
+ int (*set_config)(struct drm_mode_set *set);
+
+ /*
+ * Flip to the given framebuffer. This implements the page
+ * flip ioctl descibed in drm_mode.h, specifically, the
+ * implementation must return immediately and block all
+ * rendering to the current fb until the flip has completed.
+ * If userspace set the event flag in the ioctl, the event
+ * argument will point to an event to send back when the flip
+ * completes, otherwise it will be NULL.
+ */
+ int (*page_flip)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);
+
+ int (*set_property)(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val);
+};
+
+/**
+ * drm_crtc - central CRTC control structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object for ID tracking etc.
+ * @enabled: is this CRTC enabled?
+ * @mode: current mode timings
+ * @hwmode: mode timings as programmed to hw regs
+ * @invert_dimensions: for purposes of error checking crtc vs fb sizes,
+ * invert the width/height of the crtc. This is used if the driver
+ * is performing 90 or 270 degree rotated scanout
+ * @x: x position on screen
+ * @y: y position on screen
+ * @funcs: CRTC control functions
+ * @gamma_size: size of gamma ramp
+ * @gamma_store: gamma ramp values
+ * @framedur_ns: precise frame timing
+ * @framedur_ns: precise line timing
+ * @pixeldur_ns: precise pixel timing
+ * @helper_private: mid-layer private data
+ * @properties: property tracking for this CRTC
+ *
+ * Each CRTC may have one or more connectors associated with it. This structure
+ * allows the CRTC to be controlled.
+ */
+struct drm_crtc {
+ struct drm_device *dev;
+ struct list_head head;
+
+ /**
+ * crtc mutex
+ *
+ * This provides a read lock for the overall crtc state (mode, dpms
+ * state, ...) and a write lock for everything which can be update
+ * without a full modeset (fb, cursor data, ...)
+ */
+ struct mutex mutex;
+
+ struct drm_mode_object base;
+
+ /* framebuffer the connector is currently bound to */
+ struct drm_framebuffer *fb;
+
+ /* Temporary tracking of the old fb while a modeset is ongoing. Used
+ * by drm_mode_set_config_internal to implement correct refcounting. */
+ struct drm_framebuffer *old_fb;
+
+ bool enabled;
+
+ /* Requested mode from modesetting. */
+ struct drm_display_mode mode;
+
+ /* Programmed mode in hw, after adjustments for encoders,
+ * crtc, panel scaling etc. Needed for timestamping etc.
+ */
+ struct drm_display_mode hwmode;
+
+ bool invert_dimensions;
+
+ int x, y;
+ const struct drm_crtc_funcs *funcs;
+
+ /* CRTC gamma size for reporting to userspace */
+ uint32_t gamma_size;
+ uint16_t *gamma_store;
+
+ /* Constants needed for precise vblank and swap timestamping. */
+ s64 framedur_ns, linedur_ns, pixeldur_ns;
+
+ /* if you are using the helper */
+ void *helper_private;
+
+ struct drm_object_properties properties;
+};
+
+
+/**
+ * drm_connector_funcs - control connectors on a given device
+ * @dpms: set power state (see drm_crtc_funcs above)
+ * @save: save connector state
+ * @restore: restore connector state
+ * @reset: reset connector after state has been invalidate (e.g. resume)
+ * @detect: is this connector active?
+ * @get_modes: get mode list for this connector
+ * @set_property: property for this connector may need update
+ * @destroy: make object go away
+ * @force: notify the driver the connector is forced on
+ *
+ * Each CRTC may have one or more connectors attached to it. The functions
+ * below allow the core DRM code to control connectors, enumerate available modes,
+ * etc.
+ */
+struct drm_connector_funcs {
+ void (*dpms)(struct drm_connector *connector, int mode);
+ void (*save)(struct drm_connector *connector);
+ void (*restore)(struct drm_connector *connector);
+ void (*reset)(struct drm_connector *connector);
+
+ /* Check to see if anything is attached to the connector.
+ * @force is set to false whilst polling, true when checking the
+ * connector due to user request. @force can be used by the driver
+ * to avoid expensive, destructive operations during automated
+ * probing.
+ */
+ enum drm_connector_status (*detect)(struct drm_connector *connector,
+ bool force);
+ int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
+ int (*set_property)(struct drm_connector *connector, struct drm_property *property,
+ uint64_t val);
+ void (*destroy)(struct drm_connector *connector);
+ void (*force)(struct drm_connector *connector);
+};
+
+/**
+ * drm_encoder_funcs - encoder controls
+ * @reset: reset state (e.g. at init or resume time)
+ * @destroy: cleanup and free associated data
+ *
+ * Encoders sit between CRTCs and connectors.
+ */
+struct drm_encoder_funcs {
+ void (*reset)(struct drm_encoder *encoder);
+ void (*destroy)(struct drm_encoder *encoder);
+};
+
+#define DRM_CONNECTOR_MAX_UMODES 16
+#define DRM_CONNECTOR_LEN 32
+#define DRM_CONNECTOR_MAX_ENCODER 3
+
+/**
+ * drm_encoder - central DRM encoder structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object
+ * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
+ * @possible_crtcs: bitmask of potential CRTC bindings
+ * @possible_clones: bitmask of potential sibling encoders for cloning
+ * @crtc: currently bound CRTC
+ * @funcs: control functions
+ * @helper_private: mid-layer private data
+ *
+ * CRTCs drive pixels to encoders, which convert them into signals
+ * appropriate for a given connector or set of connectors.
+ */
+struct drm_encoder {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_mode_object base;
+ int encoder_type;
+ uint32_t possible_crtcs;
+ uint32_t possible_clones;
+
+ struct drm_crtc *crtc;
+ const struct drm_encoder_funcs *funcs;
+ void *helper_private;
+};
+
+enum drm_connector_force {
+ DRM_FORCE_UNSPECIFIED,
+ DRM_FORCE_OFF,
+ DRM_FORCE_ON, /* force on analog part normally */
+ DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
+};
+
+/* should we poll this connector for connects and disconnects */
+/* hot plug detectable */
+#define DRM_CONNECTOR_POLL_HPD (1 << 0)
+/* poll for connections */
+#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
+/* can cleanly poll for disconnections without flickering the screen */
+/* DACs should rarely do this without a lot of testing */
+#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
+
+#define MAX_ELD_BYTES 128
+
+/**
+ * drm_connector - central DRM connector control structure
+ * @dev: parent DRM device
+ * @kdev: kernel device for sysfs attributes
+ * @attr: sysfs attributes
+ * @head: list management
+ * @base: base KMS object
+ * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ * @connector_type_id: index into connector type enum
+ * @interlace_allowed: can this connector handle interlaced modes?
+ * @doublescan_allowed: can this connector handle doublescan?
+ * @modes: modes available on this connector (from fill_modes() + user)
+ * @status: one of the drm_connector_status enums (connected, not, or unknown)
+ * @probed_modes: list of modes derived directly from the display
+ * @display_info: information about attached display (e.g. from EDID)
+ * @funcs: connector control functions
+ * @edid_blob_ptr: DRM property containing EDID if present
+ * @property_ids: property tracking for this connector
+ * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
+ * @dpms: current dpms state
+ * @helper_private: mid-layer private data
+ * @force: a %DRM_FORCE_<foo> state for forced mode sets
+ * @encoder_ids: valid encoders for this connector
+ * @encoder: encoder driving this connector, if any
+ * @eld: EDID-like data, if present
+ * @dvi_dual: dual link DVI, if found
+ * @max_tmds_clock: max clock rate, if found
+ * @latency_present: AV delay info from ELD, if found
+ * @video_latency: video latency info from ELD, if found
+ * @audio_latency: audio latency info from ELD, if found
+ * @null_edid_counter: track sinks that give us all zeros for the EDID
+ *
+ * Each connector may be connected to one or more CRTCs, or may be clonable by
+ * another connector if they can share a CRTC. Each connector also has a specific
+ * position in the broader display (referred to as a 'screen' though it could
+ * span multiple monitors).
+ */
+struct drm_connector {
+ struct drm_device *dev;
+ //struct device kdev;
+ struct device_attribute *attr;
+ struct list_head head;
+
+ struct drm_mode_object base;
+
+ int connector_type;
+ int connector_type_id;
+ bool interlace_allowed;
+ bool doublescan_allowed;
+ struct list_head modes; /* list of modes on this connector */
+
+ enum drm_connector_status status;
+
+ /* these are modes added by probing with DDC or the BIOS */
+ struct list_head probed_modes;
+
+ struct drm_display_info display_info;
+ const struct drm_connector_funcs *funcs;
+
+ struct drm_property_blob *edid_blob_ptr;
+ struct drm_object_properties properties;
+
+ uint8_t polled; /* DRM_CONNECTOR_POLL_* */
+
+ /* requested DPMS state */
+ int dpms;
+
+ void *helper_private;
+
+ /* forced on connector */
+ enum drm_connector_force force;
+ uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+ struct drm_encoder *encoder; /* currently active encoder */
+
+ /* EDID bits */
+ uint8_t eld[MAX_ELD_BYTES];
+ bool dvi_dual;
+ int max_tmds_clock; /* in MHz */
+ bool latency_present[2];
+ int video_latency[2]; /* [0]: progressive, [1]: interlaced */
+ int audio_latency[2];
+ int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+ unsigned bad_edid_counter;
+};
+
+/**
+ * drm_plane_funcs - driver plane control functions
+ * @update_plane: update the plane configuration
+ * @disable_plane: shut down the plane
+ * @destroy: clean up plane resources
+ * @set_property: called when a property is changed
+ */
+struct drm_plane_funcs {
+ int (*update_plane)(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+ int (*disable_plane)(struct drm_plane *plane);
+ void (*destroy)(struct drm_plane *plane);
+
+ int (*set_property)(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val);
+};
+
+/**
+ * drm_plane - central DRM plane control structure
+ * @dev: DRM device this plane belongs to
+ * @head: for list management
+ * @base: base mode object
+ * @possible_crtcs: pipes this plane can be bound to
+ * @format_types: array of formats supported by this plane
+ * @format_count: number of formats supported
+ * @crtc: currently bound CRTC
+ * @fb: currently bound fb
+ * @funcs: helper functions
+ * @properties: property tracking for this plane
+ */
+struct drm_plane {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_mode_object base;
+
+ uint32_t possible_crtcs;
+ uint32_t *format_types;
+ uint32_t format_count;
+
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+
+ const struct drm_plane_funcs *funcs;
+
+ struct drm_object_properties properties;
+};
+
+/**
+ * drm_mode_set - new values for a CRTC config change
+ * @head: list management
+ * @fb: framebuffer to use for new config
+ * @crtc: CRTC whose configuration we're about to change
+ * @mode: mode timings to use
+ * @x: position of this CRTC relative to @fb
+ * @y: position of this CRTC relative to @fb
+ * @connectors: array of connectors to drive with this CRTC if possible
+ * @num_connectors: size of @connectors array
+ *
+ * Represents a single crtc the connectors that it drives with what mode
+ * and from which framebuffer it scans out from.
+ *
+ * This is used to set modes.
+ */
+struct drm_mode_set {
+ struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
+ struct drm_display_mode *mode;
+
+ uint32_t x;
+ uint32_t y;
+
+ struct drm_connector **connectors;
+ size_t num_connectors;
+};
+
+/**
+ * struct drm_mode_config_funcs - basic driver provided mode setting functions
+ * @fb_create: create a new framebuffer object
+ * @output_poll_changed: function to handle output configuration changes
+ *
+ * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
+ * involve drivers.
+ */
+struct drm_mode_config_funcs {
+ struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+ void (*output_poll_changed)(struct drm_device *dev);
+};
+
+/**
+ * drm_mode_group - group of mode setting resources for potential sub-grouping
+ * @num_crtcs: CRTC count
+ * @num_encoders: encoder count
+ * @num_connectors: connector count
+ * @id_list: list of KMS object IDs in this group
+ *
+ * Currently this simply tracks the global mode setting state. But in the
+ * future it could allow groups of objects to be set aside into independent
+ * control groups for use by different user level processes (e.g. two X servers
+ * running simultaneously on different heads, each with their own mode
+ * configuration and freedom of mode setting).
+ */
+struct drm_mode_group {
+ uint32_t num_crtcs;
+ uint32_t num_encoders;
+ uint32_t num_connectors;
+
+ /* list of object IDs for this group */
+ uint32_t *id_list;
+};
+
+/**
+ * drm_mode_config - Mode configuration control structure
+ * @mutex: mutex protecting KMS related lists and structures
+ * @idr_mutex: mutex for KMS ID allocation and management
+ * @crtc_idr: main KMS ID tracking object
+ * @num_fb: number of fbs available
+ * @fb_list: list of framebuffers available
+ * @num_connector: number of connectors on this device
+ * @connector_list: list of connector objects
+ * @num_encoder: number of encoders on this device
+ * @encoder_list: list of encoder objects
+ * @num_crtc: number of CRTCs on this device
+ * @crtc_list: list of CRTC objects
+ * @min_width: minimum pixel width on this device
+ * @min_height: minimum pixel height on this device
+ * @max_width: maximum pixel width on this device
+ * @max_height: maximum pixel height on this device
+ * @funcs: core driver provided mode setting functions
+ * @fb_base: base address of the framebuffer
+ * @poll_enabled: track polling status for this device
+ * @output_poll_work: delayed work for polling in process context
+ * @*_property: core property tracking
+ *
+ * Core mode resource tracking structure. All CRTC, encoders, and connectors
+ * enumerated by the driver are added here, as are global properties. Some
+ * global restrictions are also here, e.g. dimension restrictions.
+ */
+struct drm_mode_config {
+ kmutex_t mutex; /* protects configuration (mode lists etc.) */
+ kmutex_t idr_mutex; /* for IDR management */
+ struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
+ /**
+ * fb_lock - mutex to protect fb state
+ *
+ * Besides the global fb list his also protects the fbs list in the
+ * file_priv
+ */
+ struct mutex fb_lock;
+ /* this is limited to one for now */
+ int num_fb;
+ struct list_head fb_list;
+ int num_connector;
+ struct list_head connector_list;
+ int num_encoder;
+ struct list_head encoder_list;
+ int num_plane;
+ struct list_head plane_list;
+
+ int num_crtc;
+ struct list_head crtc_list;
+
+ struct list_head property_list;
+
+ int min_width, min_height;
+ int max_width, max_height;
+ const struct drm_mode_config_funcs *funcs;
+ resource_size_t fb_base;
+
+ /* output poll support */
+ bool poll_enabled;
+ bool poll_running;
+
+ /* pointers to standard properties */
+ struct list_head property_blob_list;
+ struct drm_property *edid_property;
+ struct drm_property *dpms_property;
+
+ /* DVI-I properties */
+ struct drm_property *dvi_i_subconnector_property;
+ struct drm_property *dvi_i_select_subconnector_property;
+
+ /* TV properties */
+ struct drm_property *tv_subconnector_property;
+ struct drm_property *tv_select_subconnector_property;
+ struct drm_property *tv_mode_property;
+ struct drm_property *tv_left_margin_property;
+ struct drm_property *tv_right_margin_property;
+ struct drm_property *tv_top_margin_property;
+ struct drm_property *tv_bottom_margin_property;
+ struct drm_property *tv_brightness_property;
+ struct drm_property *tv_contrast_property;
+ struct drm_property *tv_flicker_reduction_property;
+ struct drm_property *tv_overscan_property;
+ struct drm_property *tv_saturation_property;
+ struct drm_property *tv_hue_property;
+
+ /* Optional properties */
+ struct drm_property *scaling_mode_property;
+ struct drm_property *dithering_mode_property;
+ struct drm_property *dirty_info_property;
+
+ /* dumb ioctl parameters */
+ uint32_t preferred_depth, prefer_shadow;
+};
+
+#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
+#define obj_to_connector(x) container_of(x, struct drm_connector, base)
+#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
+#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
+#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
+#define obj_to_property(x) container_of(x, struct drm_property, base)
+#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
+#define obj_to_plane(x) container_of(x, struct drm_plane, base)
+
+struct drm_prop_enum_list {
+ int type;
+ char *name;
+};
+
+extern void drm_modeset_lock_all(struct drm_device *dev);
+extern void drm_modeset_unlock_all(struct drm_device *dev);
+
+extern int drm_crtc_init(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs);
+extern void drm_crtc_cleanup(struct drm_crtc *crtc);
+
+extern int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type);
+
+extern void drm_connector_cleanup(struct drm_connector *connector);
+/* helper to unplug all connectors from sysfs for device */
+extern void drm_connector_unplug_all(struct drm_device *dev);
+
+extern int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type);
+
+extern int drm_plane_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool priv);
+extern void drm_plane_cleanup(struct drm_plane *plane);
+extern void drm_plane_force_disable(struct drm_plane *plane);
+
+extern void drm_encoder_cleanup(struct drm_encoder *encoder);
+
+extern const char *drm_get_connector_name(const struct drm_connector *connector);
+extern const char *drm_get_connector_status_name(enum drm_connector_status status);
+extern const char *drm_get_dpms_name(int val);
+extern const char *drm_get_dvi_i_subconnector_name(int val);
+extern const char *drm_get_dvi_i_select_name(int val);
+extern const char *drm_get_tv_subconnector_name(int val);
+extern const char *drm_get_tv_select_name(int val);
+extern void drm_fb_release(struct drm_file *file_priv);
+extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
+extern bool drm_probe_ddc(struct i2c_adapter *adapter);
+extern struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
+extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
+extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
+extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+ const struct drm_display_mode *mode);
+extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
+extern void drm_mode_config_init(struct drm_device *dev);
+extern void drm_mode_config_reset(struct drm_device *dev);
+extern void drm_mode_config_cleanup(struct drm_device *dev);
+extern void drm_mode_set_name(struct drm_display_mode *mode);
+extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+extern bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+extern int drm_mode_width(const struct drm_display_mode *mode);
+extern int drm_mode_height(const struct drm_display_mode *mode);
+
+/* for us by fb module */
+extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
+extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
+extern void drm_mode_list_concat(struct list_head *head,
+ struct list_head *new);
+extern void drm_mode_validate_size(struct drm_device *dev,
+ struct list_head *mode_list,
+ int maxX, int maxY, int maxPitch);
+extern void drm_mode_validate_clocks(struct drm_device *dev,
+ struct list_head *mode_list,
+ int *min, int *max, int n_ranges);
+extern void drm_mode_prune_invalid(struct drm_device *dev,
+ struct list_head *mode_list, bool verbose);
+extern void drm_mode_sort(struct list_head *mode_list);
+extern int drm_mode_hsync(const struct drm_display_mode *mode);
+extern int drm_mode_vrefresh(const struct drm_display_mode *mode);
+extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
+ int adjust_flags);
+extern void drm_mode_connector_list_update(struct drm_connector *connector);
+extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+ struct edid *edid);
+extern int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t val);
+extern int drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t *value);
+extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
+extern void drm_framebuffer_set_object(struct drm_device *dev,
+ unsigned long handle);
+extern int drm_framebuffer_init(struct drm_device *dev,
+ struct drm_framebuffer *fb,
+ const struct drm_framebuffer_funcs *funcs);
+extern struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id);
+extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
+extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
+extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
+extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
+extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
+extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
+extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
+extern bool drm_crtc_in_use(struct drm_crtc *crtc);
+
+extern void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val);
+extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ const char *name, int num_values);
+extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values);
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values);
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max);
+extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
+extern int drm_property_add_enum(struct drm_property *property, int index,
+ uint64_t value, const char *name);
+extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
+extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
+ char *formats[]);
+extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+extern int drm_mode_create_dithering_property(struct drm_device *dev);
+extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
+extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
+
+extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder);
+extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder);
+extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ int gamma_size);
+extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type);
+extern int drm_mode_set_config_internal(struct drm_mode_set *set);
+/* IOCTLs */
+extern int drm_mode_getresources(DRM_IOCTL_ARGS);
+extern int drm_mode_getplane_res(DRM_IOCTL_ARGS);
+extern int drm_mode_getcrtc(DRM_IOCTL_ARGS);
+extern int drm_mode_getconnector(DRM_IOCTL_ARGS);
+extern int drm_mode_setcrtc(DRM_IOCTL_ARGS);
+extern int drm_mode_getplane(DRM_IOCTL_ARGS);
+extern int drm_mode_setplane(DRM_IOCTL_ARGS);
+extern int drm_mode_cursor_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_cursor2_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_addfb(DRM_IOCTL_ARGS);
+extern int drm_mode_addfb2(DRM_IOCTL_ARGS);
+extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
+extern int drm_mode_rmfb(DRM_IOCTL_ARGS);
+extern int drm_mode_getfb(DRM_IOCTL_ARGS);
+extern int drm_mode_dirtyfb_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_getproperty_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_getblob_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_connector_property_set_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_hotplug_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_replacefb(DRM_IOCTL_ARGS);
+extern int drm_mode_getencoder(DRM_IOCTL_ARGS);
+extern int drm_mode_gamma_get_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_gamma_set_ioctl(DRM_IOCTL_ARGS);
+extern u8 *drm_find_cea_extension(struct edid *edid);
+extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
+extern bool drm_detect_hdmi_monitor(struct edid *edid);
+extern bool drm_detect_monitor_audio(struct edid *edid);
+extern bool drm_rgb_quant_range_selectable(struct edid *edid);
+extern int drm_mode_page_flip_ioctl(DRM_IOCTL_ARGS);
+extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool reduced, bool interlaced, bool margins);
+extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool interlaced, int margins);
+extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool interlaced, int margins, int GTF_M,
+ int GTF_2C, int GTF_K, int GTF_2J);
+extern int drm_add_modes_noedid(struct drm_connector *connector,
+ int hdisplay, int vdisplay);
+
+extern int drm_edid_header_is_valid(const u8 *raw_edid);
+extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
+extern bool drm_edid_is_valid(struct edid *edid);
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh,
+ bool rb);
+
+extern int drm_mode_create_dumb_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_mmap_dumb_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_destroy_dumb_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_obj_get_properties_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mode_obj_set_property_ioctl(DRM_IOCTL_ARGS);
+
+extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp);
+extern int drm_format_num_planes(uint32_t format);
+extern int drm_format_plane_cpp(uint32_t format, int plane);
+extern int drm_format_horz_chroma_subsampling(uint32_t format);
+extern int drm_format_vert_chroma_subsampling(uint32_t format);
+extern const char *drm_get_format_name(uint32_t format);
+
+#endif /* __DRM_CRTC_H__ */
diff --git a/usr/src/uts/common/drm/drm_crtc_helper.h b/usr/src/uts/common/drm/drm_crtc_helper.h
new file mode 100644
index 0000000..af826a2
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_crtc_helper.h
@@ -0,0 +1,176 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * The DRM mode setting helper functions are common code for drivers to use if
+ * they wish. Drivers are not forced to use this code in their
+ * implementations but it would be useful if they code they do use at least
+ * provides a consistent interface and operation to userspace
+ */
+
+#ifndef __DRM_CRTC_HELPER_H__
+#define __DRM_CRTC_HELPER_H__
+
+#include <sys/types.h>
+#include "drmP.h"
+#include "drm_sun_idr.h"
+
+enum mode_set_atomic {
+ LEAVE_ATOMIC_MODE_SET,
+ ENTER_ATOMIC_MODE_SET,
+};
+
+/**
+ * drm_crtc_helper_funcs - helper operations for CRTCs
+ * @mode_fixup: try to fixup proposed mode for this connector
+ * @mode_set: set this mode
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_crtc_helper_funcs {
+ /*
+ * Control power levels on the CRTC. If the mode passed in is
+ * unsupported, the provider must use the next lowest power level.
+ */
+ void (*dpms)(struct drm_crtc *crtc, int mode);
+ void (*prepare)(struct drm_crtc *crtc);
+ void (*commit)(struct drm_crtc *crtc);
+
+ /* Provider can fixup or change mode timings before modeset occurs */
+ bool (*mode_fixup)(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ /* Actually set the mode */
+ int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
+
+ /* Move the crtc on the current fb to the given position *optional* */
+ int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+ int (*mode_set_base_atomic)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic);
+
+ /* reload the current crtc LUT */
+ void (*load_lut)(struct drm_crtc *crtc);
+
+ /* disable crtc when not in use - more explicit than dpms off */
+ void (*disable)(struct drm_crtc *crtc);
+};
+
+/**
+ * drm_encoder_helper_funcs - helper operations for encoders
+ * @mode_fixup: try to fixup proposed mode for this connector
+ * @mode_set: set this mode
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_encoder_helper_funcs {
+ void (*dpms)(struct drm_encoder *encoder, int mode);
+ void (*save)(struct drm_encoder *encoder);
+ void (*restore)(struct drm_encoder *encoder);
+
+ bool (*mode_fixup)(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*prepare)(struct drm_encoder *encoder);
+ void (*commit)(struct drm_encoder *encoder);
+ void (*mode_set)(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
+ /* detect for DAC style encoders */
+ enum drm_connector_status (*detect)(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+ /* disable encoder when not in use - more explicit than dpms off */
+ void (*disable)(struct drm_encoder *encoder);
+};
+
+/**
+ * drm_connector_helper_funcs - helper operations for connectors
+ * @get_modes: get mode list for this connector
+ * @mode_valid: is this mode valid on the given connector?
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_connector_helper_funcs {
+ int (*get_modes)(struct drm_connector *connector);
+ int (*mode_valid)(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+ struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
+};
+
+extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
+extern void drm_helper_disable_unused_functions(struct drm_device *dev);
+extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
+extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb);
+extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
+extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
+
+extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+
+extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
+
+extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+
+static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
+ const struct drm_crtc_helper_funcs *funcs)
+{
+ crtc->helper_private = (void *)funcs;
+}
+
+static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
+ const struct drm_encoder_helper_funcs *funcs)
+{
+ encoder->helper_private = (void *)funcs;
+}
+
+static inline void drm_connector_helper_add(struct drm_connector *connector,
+ const struct drm_connector_helper_funcs *funcs)
+{
+ connector->helper_private = (void *)funcs;
+}
+
+extern void drm_helper_resume_force_mode(struct drm_device *dev);
+extern void drm_kms_helper_poll_init(struct drm_device *dev);
+extern void drm_kms_helper_poll_fini(struct drm_device *dev);
+extern void drm_helper_hpd_irq_event(struct drm_device *dev);
+extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
+
+extern void drm_kms_helper_poll_disable(struct drm_device *dev);
+extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+
+#endif /* __DRM_CRTC_HELPER_H__ */
diff --git a/usr/src/uts/common/drm/drm_dp_helper.h b/usr/src/uts/common/drm/drm_dp_helper.h
new file mode 100644
index 0000000..55c8e85
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_dp_helper.h
@@ -0,0 +1,368 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _DRM_DP_HELPER_H_
+#define _DRM_DP_HELPER_H_
+
+#include "drm_linux.h"
+#include "drm_sun_i2c.h"
+
+/*
+ * Unless otherwise noted, all values are from the DP 1.1a spec. Note that
+ * DP and DPCD versions are independent. Differences from 1.0 are not noted,
+ * 1.0 devices basically don't exist in the wild.
+ *
+ * Abbreviations, in chronological order:
+ *
+ * eDP: Embedded DisplayPort version 1
+ * DPI: DisplayPort Interoperability Guideline v1.1a
+ * 1.2: DisplayPort 1.2
+ *
+ * 1.2 formally includes both eDP and DPI definitions.
+ */
+
+#define AUX_NATIVE_WRITE 0x8
+#define AUX_NATIVE_READ 0x9
+#define AUX_I2C_WRITE 0x0
+#define AUX_I2C_READ 0x1
+#define AUX_I2C_STATUS 0x2
+#define AUX_I2C_MOT 0x4
+
+#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
+#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
+#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
+#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
+
+#define AUX_I2C_REPLY_ACK (0x0 << 6)
+#define AUX_I2C_REPLY_NACK (0x1 << 6)
+#define AUX_I2C_REPLY_DEFER (0x2 << 6)
+#define AUX_I2C_REPLY_MASK (0x3 << 6)
+
+/* AUX CH addresses */
+/* DPCD */
+#define DP_DPCD_REV 0x000
+
+#define DP_MAX_LINK_RATE 0x001
+
+#define DP_MAX_LANE_COUNT 0x002
+# define DP_MAX_LANE_COUNT_MASK 0x1f
+# define DP_TPS3_SUPPORTED (1 << 6)
+# define DP_ENHANCED_FRAME_CAP (1 << 7)
+
+#define DP_MAX_DOWNSPREAD 0x003
+# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
+
+#define DP_NORP 0x004
+
+#define DP_DOWNSTREAMPORT_PRESENT 0x005
+# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
+# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
+/* 00b = DisplayPort */
+/* 01b = Analog */
+/* 10b = TMDS or HDMI */
+/* 11b = Other */
+# define DP_FORMAT_CONVERSION (1 << 3)
+# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
+
+#define DP_MAIN_LINK_CHANNEL_CODING 0x006
+
+#define DP_DOWN_STREAM_PORT_COUNT 0x007
+# define DP_PORT_COUNT_MASK 0x0f
+# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
+# define DP_OUI_SUPPORT (1 << 7)
+
+#define DP_I2C_SPEED_CAP 0x00c /* DPI */
+# define DP_I2C_SPEED_1K 0x01
+# define DP_I2C_SPEED_5K 0x02
+# define DP_I2C_SPEED_10K 0x04
+# define DP_I2C_SPEED_100K 0x08
+# define DP_I2C_SPEED_400K 0x10
+# define DP_I2C_SPEED_1M 0x20
+
+#define DP_EDP_CONFIGURATION_CAP 0x00d
+#define DP_TRAINING_AUX_RD_INTERVAL 0x00e
+
+/* Multiple stream transport */
+#define DP_MSTM_CAP 0x021 /* 1.2 */
+# define DP_MST_CAP (1 << 0)
+
+#define DP_PSR_SUPPORT 0x070
+# define DP_PSR_IS_SUPPORTED 1
+#define DP_PSR_CAPS 0x071
+# define DP_PSR_NO_TRAIN_ON_EXIT 1
+# define DP_PSR_SETUP_TIME_330 (0 << 1)
+# define DP_PSR_SETUP_TIME_275 (1 << 1)
+# define DP_PSR_SETUP_TIME_220 (2 << 1)
+# define DP_PSR_SETUP_TIME_165 (3 << 1)
+# define DP_PSR_SETUP_TIME_110 (4 << 1)
+# define DP_PSR_SETUP_TIME_55 (5 << 1)
+# define DP_PSR_SETUP_TIME_0 (6 << 1)
+# define DP_PSR_SETUP_TIME_MASK (7 << 1)
+# define DP_PSR_SETUP_TIME_SHIFT 1
+
+/*
+ * 0x80-0x8f describe downstream port capabilities, but there are two layouts
+ * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not,
+ * each port's descriptor is one byte wide. If it was set, each port's is
+ * four bytes wide, starting with the one byte from the base info. As of
+ * DP interop v1.1a only VGA defines additional detail.
+ */
+
+/* offset 0 */
+#define DP_DOWNSTREAM_PORT_0 0x80
+# define DP_DS_PORT_TYPE_MASK (7 << 0)
+# define DP_DS_PORT_TYPE_DP 0
+# define DP_DS_PORT_TYPE_VGA 1
+# define DP_DS_PORT_TYPE_DVI 2
+# define DP_DS_PORT_TYPE_HDMI 3
+# define DP_DS_PORT_TYPE_NON_EDID 4
+# define DP_DS_PORT_HPD (1 << 3)
+/* offset 1 for VGA is maximum megapixels per second / 8 */
+/* offset 2 */
+# define DP_DS_VGA_MAX_BPC_MASK (3 << 0)
+# define DP_DS_VGA_8BPC 0
+# define DP_DS_VGA_10BPC 1
+# define DP_DS_VGA_12BPC 2
+# define DP_DS_VGA_16BPC 3
+
+/* link configuration */
+#define DP_LINK_BW_SET 0x100
+# define DP_LINK_BW_1_62 0x06
+# define DP_LINK_BW_2_7 0x0a
+# define DP_LINK_BW_5_4 0x14
+
+#define DP_LANE_COUNT_SET 0x101
+# define DP_LANE_COUNT_MASK 0x0f
+# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
+
+#define DP_TRAINING_PATTERN_SET 0x102
+# define DP_TRAINING_PATTERN_DISABLE 0
+# define DP_TRAINING_PATTERN_1 1
+# define DP_TRAINING_PATTERN_2 2
+# define DP_TRAINING_PATTERN_3 3
+# define DP_TRAINING_PATTERN_MASK 0x3
+
+# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
+# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
+# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
+# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
+# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
+
+# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
+# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
+
+# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
+# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
+# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
+# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
+
+#define DP_TRAINING_LANE0_SET 0x103
+#define DP_TRAINING_LANE1_SET 0x104
+#define DP_TRAINING_LANE2_SET 0x105
+#define DP_TRAINING_LANE3_SET 0x106
+
+# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
+# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
+# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
+# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
+
+# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
+
+# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
+# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
+
+#define DP_DOWNSPREAD_CTRL 0x107
+# define DP_SPREAD_AMP_0_5 (1 << 4)
+# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
+
+#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
+# define DP_SET_ANSI_8B10B (1 << 0)
+
+#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */
+/* bitmask as for DP_I2C_SPEED_CAP */
+
+#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
+
+#define DP_MSTM_CTRL 0x111 /* 1.2 */
+# define DP_MST_EN (1 << 0)
+# define DP_UP_REQ_EN (1 << 1)
+# define DP_UPSTREAM_IS_SRC (1 << 2)
+
+#define DP_PSR_EN_CFG 0x170
+# define DP_PSR_ENABLE (1 << 0)
+# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
+# define DP_PSR_CRC_VERIFICATION (1 << 2)
+# define DP_PSR_FRAME_CAPTURE (1 << 3)
+
+#define DP_SINK_COUNT 0x200
+/* prior to 1.2 bit 7 was reserved mbz */
+# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
+# define DP_SINK_CP_READY (1 << 6)
+
+#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201
+# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
+# define DP_AUTOMATED_TEST_REQUEST (1 << 1)
+# define DP_CP_IRQ (1 << 2)
+# define DP_SINK_SPECIFIC_IRQ (1 << 6)
+
+#define DP_LANE0_1_STATUS 0x202
+#define DP_LANE2_3_STATUS 0x203
+# define DP_LANE_CR_DONE (1 << 0)
+# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
+# define DP_LANE_SYMBOL_LOCKED (1 << 2)
+
+#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \
+ DP_LANE_CHANNEL_EQ_DONE | \
+ DP_LANE_SYMBOL_LOCKED)
+
+#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
+
+#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
+#define DP_LINK_STATUS_UPDATED (1 << 7)
+
+#define DP_SINK_STATUS 0x205
+
+#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
+#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+
+#define DP_ADJUST_REQUEST_LANE0_1 0x206
+#define DP_ADJUST_REQUEST_LANE2_3 0x207
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+
+#define DP_TEST_REQUEST 0x218
+# define DP_TEST_LINK_TRAINING (1 << 0)
+# define DP_TEST_LINK_PATTERN (1 << 1)
+# define DP_TEST_LINK_EDID_READ (1 << 2)
+# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */
+
+#define DP_TEST_LINK_RATE 0x219
+# define DP_LINK_RATE_162 (0x6)
+# define DP_LINK_RATE_27 (0xa)
+
+#define DP_TEST_LANE_COUNT 0x220
+
+#define DP_TEST_PATTERN 0x221
+
+#define DP_TEST_RESPONSE 0x260
+# define DP_TEST_ACK (1 << 0)
+# define DP_TEST_NAK (1 << 1)
+# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
+
+#define DP_SOURCE_OUI 0x300
+#define DP_SINK_OUI 0x400
+#define DP_BRANCH_OUI 0x500
+
+#define DP_SET_POWER 0x600
+# define DP_SET_POWER_D0 0x1
+# define DP_SET_POWER_D3 0x2
+
+#define DP_PSR_ERROR_STATUS 0x2006
+# define DP_PSR_LINK_CRC_ERROR (1 << 0)
+# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
+
+#define DP_PSR_ESI 0x2007
+# define DP_PSR_CAPS_CHANGE (1 << 0)
+
+#define DP_PSR_STATUS 0x2008
+# define DP_PSR_SINK_INACTIVE 0
+# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1
+# define DP_PSR_SINK_ACTIVE_RFB 2
+# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3
+# define DP_PSR_SINK_ACTIVE_RESYNC 4
+# define DP_PSR_SINK_INTERNAL_ERROR 7
+# define DP_PSR_SINK_STATE_MASK 0x07
+
+#define MODE_I2C_START 1
+#define MODE_I2C_WRITE 2
+#define MODE_I2C_READ 4
+#define MODE_I2C_STOP 8
+
+/**
+ * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
+ * aux algorithm
+ * @running: set by the algo indicating whether an i2c is ongoing or whether
+ * the i2c bus is quiescent
+ * @address: i2c target address for the currently ongoing transfer
+ * @aux_ch: driver callback to transfer a single byte of the i2c payload
+ */
+struct i2c_algo_dp_aux_data {
+ bool running;
+ u16 address;
+ int (*aux_ch) (struct i2c_adapter *adapter,
+ int mode, uint8_t write_byte,
+ uint8_t *read_byte);
+};
+
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
+
+#define DP_LINK_STATUS_SIZE 6
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+
+#define DP_RECEIVER_CAP_SIZE 0xf
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate);
+int drm_dp_bw_code_to_link_rate(u8 link_bw);
+
+int drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+static inline u8
+drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+}
+
+#endif /* _DRM_DP_HELPER_H_ */
diff --git a/usr/src/uts/common/drm/drm_edid.h b/usr/src/uts/common/drm/drm_edid.h
new file mode 100644
index 0000000..4622515
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_edid.h
@@ -0,0 +1,295 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_EDID_H__
+#define __DRM_EDID_H__
+
+#include "drm.h"
+#include "drmP.h"
+#define EDID_LENGTH 128
+#define DDC_ADDR 0x50
+
+#define CEA_EXT 0x02
+#define VTB_EXT 0x10
+#define DI_EXT 0x40
+#define LS_EXT 0x50
+#define MI_EXT 0x60
+
+#pragma pack(1)
+struct est_timings {
+ u8 t1;
+ u8 t2;
+ u8 mfg_rsvd;
+} __attribute__((packed));
+#pragma pack()
+
+/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
+#define EDID_TIMING_ASPECT_SHIFT 6
+#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT)
+
+/* need to add 60 */
+#define EDID_TIMING_VFREQ_SHIFT 0
+#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT)
+
+#pragma pack(1)
+struct std_timing {
+ u8 hsize; /* need to multiply by 8 then add 248 */
+ u8 vfreq_aspect;
+} __attribute__((packed));
+#pragma pack()
+
+#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
+#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
+#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
+#define DRM_EDID_PT_STEREO (1 << 5)
+#define DRM_EDID_PT_INTERLACED (1 << 7)
+
+/* If detailed data is pixel timing */
+#pragma pack(1)
+struct detailed_pixel_timing {
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hactive_hblank_hi;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vactive_vblank_hi;
+ u8 hsync_offset_lo;
+ u8 hsync_pulse_width_lo;
+ u8 vsync_offset_pulse_width_lo;
+ u8 hsync_vsync_offset_pulse_width_hi;
+ u8 width_mm_lo;
+ u8 height_mm_lo;
+ u8 width_height_mm_hi;
+ u8 hborder;
+ u8 vborder;
+ u8 misc;
+} __attribute__((packed));
+#pragma pack()
+
+/* If it's not pixel timing, it'll be one of the below */
+#pragma pack(1)
+struct detailed_data_string {
+ u8 str[13];
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct detailed_data_monitor_range {
+ u8 min_vfreq;
+ u8 max_vfreq;
+ u8 min_hfreq_khz;
+ u8 max_hfreq_khz;
+ u8 pixel_clock_mhz; /* need to multiply by 10 */
+ u8 flags;
+ union {
+ struct {
+ u8 reserved;
+ u8 hfreq_start_khz; /* need to multiply by 2 */
+ u8 c; /* need to divide by 2 */
+ __u16 m;
+ u8 k;
+ u8 j; /* need to divide by 2 */
+ } __attribute__((packed)) gtf2;
+ struct {
+ u8 version;
+ u8 data1; /* high 6 bits: extra clock resolution */
+ u8 data2; /* plus low 2 of above: max hactive */
+ u8 supported_aspects;
+ u8 flags; /* preferred aspect and blanking support */
+ u8 supported_scalings;
+ u8 preferred_refresh;
+ } __attribute__((packed)) cvt;
+ } formula;
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct detailed_data_wpindex {
+ u8 white_yx_lo; /* Lower 2 bits each */
+ u8 white_x_hi;
+ u8 white_y_hi;
+ u8 gamma; /* need to divide by 100 then add 1 */
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct detailed_data_color_point {
+ u8 windex1;
+ u8 wpindex1[3];
+ u8 windex2;
+ u8 wpindex2[3];
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct cvt_timing {
+ u8 code[3];
+} __attribute__((packed));
+#pragma pack()
+
+#pragma pack(1)
+struct detailed_non_pixel {
+ u8 pad1;
+ u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
+ fb=color point data, fa=standard timing data,
+ f9=undefined, f8=mfg. reserved */
+ u8 pad2;
+ union {
+ struct detailed_data_string str;
+ struct detailed_data_monitor_range range;
+ struct detailed_data_wpindex color;
+ struct std_timing timings[5];
+ struct cvt_timing cvt[4];
+ } data;
+} __attribute__((packed));
+#pragma pack()
+
+#define EDID_DETAIL_EST_TIMINGS 0xf7
+#define EDID_DETAIL_CVT_3BYTE 0xf8
+#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
+#define EDID_DETAIL_STD_MODES 0xfa
+#define EDID_DETAIL_MONITOR_CPDATA 0xfb
+#define EDID_DETAIL_MONITOR_NAME 0xfc
+#define EDID_DETAIL_MONITOR_RANGE 0xfd
+#define EDID_DETAIL_MONITOR_STRING 0xfe
+#define EDID_DETAIL_MONITOR_SERIAL 0xff
+
+#pragma pack(1)
+struct detailed_timing {
+ __u16 pixel_clock; /* need to multiply by 10 KHz */
+ union {
+ struct detailed_pixel_timing pixel_data;
+ struct detailed_non_pixel other_data;
+ } data;
+} __attribute__((packed));
+#pragma pack()
+
+#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
+#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1)
+#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2)
+#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3)
+#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
+#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
+#define DRM_EDID_INPUT_DIGITAL (1 << 7) /* bits below must be zero if set */
+#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4)
+#define DRM_EDID_DIGITAL_TYPE_UNDEF (0)
+#define DRM_EDID_DIGITAL_TYPE_DVI (1)
+#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2)
+#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3)
+#define DRM_EDID_DIGITAL_TYPE_MDDI (4)
+#define DRM_EDID_DIGITAL_TYPE_DP (5)
+
+#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
+#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
+#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
+#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
+/* If digital */
+#define DRM_EDID_FEATURE_COLOR_MASK (3 << 3)
+#define DRM_EDID_FEATURE_RGB (0 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB444 (1 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB422 (2 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB (3 << 3) /* both 4:4:4 and 4:2:2 */
+
+#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5)
+#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
+#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
+
+#pragma pack(1)
+struct edid {
+ u8 header[8];
+ /* Vendor & product info */
+ u8 mfg_id[2];
+ u8 prod_code[2];
+ __u32 serial; /* FIXME: byte order */
+ u8 mfg_week;
+ u8 mfg_year;
+ /* EDID version */
+ u8 version;
+ u8 revision;
+ /* Display info: */
+ u8 input;
+ u8 width_cm;
+ u8 height_cm;
+ u8 gamma;
+ u8 features;
+ /* Color characteristics */
+ u8 red_green_lo;
+ u8 black_white_lo;
+ u8 red_x;
+ u8 red_y;
+ u8 green_x;
+ u8 green_y;
+ u8 blue_x;
+ u8 blue_y;
+ u8 white_x;
+ u8 white_y;
+ /* Est. timings and mfg rsvd timings*/
+ struct est_timings established_timings;
+ /* Standard timings 1-8*/
+ struct std_timing standard_timings[8];
+ /* Detailing timings 1-4 */
+ struct detailed_timing detailed_timings[4];
+ /* Number of 128 byte ext. blocks */
+ u8 extensions;
+ /* Checksum */
+ u8 checksum;
+} __attribute__((packed));
+#pragma pack()
+
+#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
+
+/* define the number of Extension EDID block */
+#define DRM_MAX_EDID_EXT_NUM 4
+
+/* Short Audio Descriptor */
+struct cea_sad {
+ u8 format;
+ u8 channels; /* max number of channels - 1 */
+ u8 freq;
+ u8 byte2; /* meaning depends on format */
+};
+
+struct drm_encoder;
+struct drm_connector;
+struct drm_display_mode;
+void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
+int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
+int drm_av_sync_delay(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+
+#endif /* __DRM_EDID_H__ */
diff --git a/usr/src/uts/common/drm/drm_fb_helper.h b/usr/src/uts/common/drm/drm_fb_helper.h
new file mode 100644
index 0000000..ec8d330
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_fb_helper.h
@@ -0,0 +1,121 @@
+/* BEGIN CSTYLED */
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008, 2012, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+#ifndef _DRM_FB_HELPER_H
+#define _DRM_FB_HELPER_H
+
+struct drm_fb_helper;
+
+struct drm_fb_helper_crtc {
+ struct drm_mode_set mode_set;
+ struct drm_display_mode *desired_mode;
+};
+
+struct drm_fb_helper_surface_size {
+ u32 fb_width;
+ u32 fb_height;
+ u32 surface_width;
+ u32 surface_height;
+ u32 surface_bpp;
+ u32 surface_depth;
+};
+
+/**
+ * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
+ * @gamma_set: - Set the given gamma lut register on the given crtc.
+ * @gamma_get: - Read the given gamma lut register on the given crtc, used to
+ * save the current lut when force-restoring the fbdev for e.g.
+ * kdbg.
+ * @fb_probe: - Driver callback to allocate and initialize the fbdev info
+ * structure. Futhermore it also needs to allocate the drm
+ * framebuffer used to back the fbdev.
+ *
+ * Driver callbacks used by the fbdev emulation helper library.
+ */
+struct drm_fb_helper_funcs {
+ void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+ void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+ int (*fb_probe)(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+ bool (*initial_config)(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **crtcs,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height);
+};
+
+struct drm_fb_helper_connector {
+ struct drm_connector *connector;
+ struct drm_cmdline_mode cmdline_mode;
+};
+
+struct drm_fb_helper {
+ struct drm_framebuffer *fb;
+ struct drm_device *dev;
+ int crtc_count;
+ struct drm_fb_helper_crtc *crtc_info;
+ int connector_count;
+ struct drm_fb_helper_connector **connector_info;
+ struct drm_fb_helper_funcs *funcs;
+ struct fb_info *fbdev;
+ u32 pseudo_palette[17];
+ struct list_head kernel_fb_list;
+
+ /* we got a hotplug but fbdev wasn't running the console
+ delay until next set_par */
+ bool delayed_hotplug;
+};
+
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper,
+ int preferred_bpp);
+
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *helper, int crtc_count,
+ int max_conn);
+void drm_fb_helper_fini(struct drm_fb_helper *helper);
+
+bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper);
+bool drm_fb_helper_force_kernel_mode(void);
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
+ uint32_t fb_width, uint32_t fb_height);
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+ uint32_t depth);
+int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
+void drm_register_fbops(struct drm_device *dev);
+int drm_getfb_size(struct drm_device *dev);
+#endif /* _DRM_FB_HELPER_H */
diff --git a/usr/src/uts/common/drm/drm_fourcc.h b/usr/src/uts/common/drm/drm_fourcc.h
new file mode 100644
index 0000000..7af2285
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_fourcc.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef DRM_FOURCC_H
+#define DRM_FOURCC_H
+
+#include <inttypes.h>
+
+#define fourcc_code(a,b,c,d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \
+ ((uint32_t)(c) << 16) | ((uint32_t)(d) << 24))
+
+#define DRM_FORMAT_BIG_ENDIAN (1UL<<31) /* format is big endian instead of little endian */
+
+/* color index */
+#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
+
+/* 8 bpp RGB */
+#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
+#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
+
+/* 16 bpp RGB */
+#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
+
+#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
+#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
+#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
+#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
+
+#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
+
+#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
+#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
+#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
+#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
+
+#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
+#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
+
+/* 24 bpp RGB */
+#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
+#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
+
+/* 32 bpp RGB */
+#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
+
+#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
+#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
+#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
+#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
+
+#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
+
+#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
+#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
+#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
+#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+
+/* packed YCbCr */
+#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
+#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
+#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
+
+#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [7:0] Y
+ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
+ * or
+ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
+ */
+#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
+#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
+
+/*
+ * 3 plane YCbCr
+ * index 0: Y plane, [7:0] Y
+ * index 1: Cb plane, [7:0] Cb
+ * index 2: Cr plane, [7:0] Cr
+ * or
+ * index 1: Cr plane, [7:0] Cr
+ * index 2: Cb plane, [7:0] Cb
+ */
+#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
+#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
+#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
+
+
+/*
+ * Format Modifiers:
+ *
+ * Format modifiers describe, typically, a re-ordering or modification
+ * of the data in a plane of an FB. This can be used to express tiled/
+ * swizzled formats, or compression, or a combination of the two.
+ *
+ * The upper 8 bits of the format modifier are a vendor-id as assigned
+ * below. The lower 56 bits are assigned as vendor sees fit.
+ */
+
+/* Vendor Ids: */
+#define DRM_FORMAT_MOD_NONE 0
+#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
+#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
+#define DRM_FORMAT_MOD_VENDOR_NV 0x03
+#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
+#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
+/* add more to the end as needed */
+
+#define fourcc_mod_code(vendor, val) \
+ ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
+
+/*
+ * Format Modifier tokens:
+ *
+ * When adding a new token please document the layout with a code comment,
+ * similar to the fourcc codes above. drm_fourcc.h is considered the
+ * authoritative source for all of these.
+ */
+
+/* Intel framebuffer modifiers */
+
+/*
+ * Intel X-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
+ * in row-major layout. Within the tile bytes are laid out row-major, with
+ * a platform-dependent stride. On top of that the memory can apply
+ * platform-depending swizzling of some higher address bits into bit6.
+ *
+ * This format is highly platforms specific and not useful for cross-driver
+ * sharing. It exists since on a given platform it does uniquely identify the
+ * layout in a simple way for i915-specific userspace.
+ */
+#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
+
+/*
+ * Intel Y-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
+ * in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes)
+ * chunks column-major, with a platform-dependent height. On top of that the
+ * memory can apply platform-depending swizzling of some higher address bits
+ * into bit6.
+ *
+ * This format is highly platforms specific and not useful for cross-driver
+ * sharing. It exists since on a given platform it does uniquely identify the
+ * layout in a simple way for i915-specific userspace.
+ */
+#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
+
+/*
+ * Intel Yf-tiling layout
+ *
+ * This is a tiled layout using 4Kb tiles in row-major layout.
+ * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
+ * are arranged in four groups (two wide, two high) with column-major layout.
+ * Each group therefore consits out of four 256 byte units, which are also laid
+ * out as 2x2 column-major.
+ * 256 byte units are made out of four 64 byte blocks of pixels, producing
+ * either a square block or a 2:1 unit.
+ * 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width
+ * in pixel depends on the pixel depth.
+ */
+#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
+
+/*
+ * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
+ *
+ * Macroblocks are laid in a Z-shape, and each pixel data is following the
+ * standard NV12 style.
+ * As for NV12, an image is the result of two frame buffers: one for Y,
+ * one for the interleaved Cb/Cr components (1/2 the height of the Y buffer).
+ * Alignment requirements are (for each buffer):
+ * - multiple of 128 pixels for the width
+ * - multiple of 32 pixels for the height
+ *
+ * For more information: see http://linuxtv.org/downloads/v4l-dvb-apis/re32.html
+ */
+#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
+
+#endif /* DRM_FOURCC_H */
diff --git a/usr/src/uts/common/io/drm/drm_io32.h b/usr/src/uts/common/drm/drm_io32.h
index e710697..d406b81 100644
--- a/usr/src/uts/common/io/drm/drm_io32.h
+++ b/usr/src/uts/common/drm/drm_io32.h
@@ -1,27 +1,28 @@
/*
- * CDDL HEADER START
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
*
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
*
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
*/
#ifndef _DRM_IO32_H_
@@ -81,19 +82,13 @@ typedef struct drm_stats_32 {
typedef struct drm_buf_desc_32 {
- int count; /* Number of buffers of this size */
- int size; /* Size in bytes */
- int low_mark; /* Low water mark */
- int high_mark; /* High water mark */
+ int count; /* Number of buffers of this size */
+ int size; /* Size in bytes */
+ int low_mark; /* Low water mark */
+ int high_mark; /* High water mark */
drm_buf_flag flags;
-
- /*
- * Start address of where the AGP buffers are
- * in the AGP aperture
- */
uint32_t agp_start;
-
-}drm_buf_desc_32_t;
+} drm_buf_desc_32_t;
typedef struct drm_buf_free_32 {
int count;
@@ -182,6 +177,29 @@ typedef union drm_wait_vblank_32 {
} drm_wait_vblank_32_t;
+extern int copyin32_drm_map(void *dest, void *src);
+extern int copyout32_drm_map(void *dest, void *src);
+extern int copyin32_drm_buf_desc(void * dest, void * src);
+extern int copyout32_drm_buf_desc(void * dest, void * src);
+extern int copyin32_drm_buf_free(void * dest, void * src);
+extern int copyin32_drm_buf_map(void * dest, void * src);
+extern int copyout32_drm_buf_map(void * dest, void * src);
+extern int copyin32_drm_ctx_priv_map(void * dest, void * src);
+extern int copyout32_drm_ctx_priv_map(void * dest, void * src);
+extern int copyin32_drm_ctx_res(void * dest, void * src);
+extern int copyout32_drm_ctx_res(void * dest, void * src);
+extern int copyin32_drm_unique(void * dest, void * src);
+extern int copyout32_drm_unique(void * dest, void * src);
+extern int copyin32_drm_client(void * dest, void * src);
+extern int copyout32_drm_client(void * dest, void * src);
+extern int copyout32_drm_stats(void * dest, void * src);
+extern int copyin32_drm_version(void * dest, void * src);
+extern int copyout32_drm_version(void * dest, void * src);
+extern int copyin32_drm_wait_vblank(void * dest, void * src);
+extern int copyout32_drm_wait_vblank(void * dest, void * src);
+extern int copyin32_drm_scatter_gather(void * dest, void * src);
+extern int copyout32_drm_scatter_gather(void * dest, void * src);
+
#endif /* _MULTI_DATAMODEL */
#endif /* _DRM_IO32_H_ */
diff --git a/usr/src/uts/common/drm/drm_linux.h b/usr/src/uts/common/drm/drm_linux.h
new file mode 100644
index 0000000..02baf92
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_linux.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012, 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __DRM_LINUX_H__
+#define __DRM_LINUX_H__
+
+#include <sys/types.h>
+#include <sys/byteorder.h>
+#include "drm_atomic.h"
+
+#define DRM_MEM_CACHED 0
+#define DRM_MEM_UNCACHED 1
+#define DRM_MEM_WC 2
+
+#ifndef min
+#define min(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef max
+#define max(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#define clamp_int64_t(val) \
+ val = min((int64_t)INT_MAX, val); \
+ val = max((int64_t)INT_MIN, val);
+
+#define ioremap_wc(base,size) drm_sun_ioremap((base), (size), DRM_MEM_WC)
+#define ioremap(base, size) drm_sun_ioremap((base), (size), DRM_MEM_UNCACHED)
+#define iounmap(addr) drm_sun_iounmap((addr))
+
+#define spinlock_t kmutex_t
+#define spin_lock_init(l) mutex_init((l), NULL, MUTEX_DRIVER, NULL);
+#define spin_lock(l) mutex_enter(l)
+#define spin_unlock(u) mutex_exit(u)
+#define spin_lock_irq(l) mutex_enter(l)
+#define spin_unlock_irq(u) mutex_exit(u)
+#ifdef __lint
+/*
+ * The following is to keep lint happy when it encouters the use of 'flag'.
+ * On Linux, this allows a local variable to be used to retain context,
+ * but is unused on Solaris. Rather than trying to place LINTED
+ * directives in the source, we actually consue the flag for lint here.
+ */
+#define spin_lock_irqsave(l, flag) flag = 0; mutex_enter(l)
+#define spin_unlock_irqrestore(u, flag) flag &= flag; mutex_exit(u)
+#else
+#define spin_lock_irqsave(l, flag) mutex_enter(l)
+#define spin_unlock_irqrestore(u, flag) mutex_exit(u)
+#endif
+
+#define mutex_lock(l) mutex_enter(l)
+#define mutex_unlock(u) mutex_exit(u)
+#define mutex_is_locked(l) mutex_owned(l)
+
+#define assert_spin_locked(l) ASSERT(MUTEX_HELD(l))
+
+#define kmalloc kmem_alloc
+#define kzalloc kmem_zalloc
+#define kcalloc(x, y, z) kzalloc((x)*(y), z)
+#define kfree kmem_free
+
+#define do_gettimeofday (void) uniqtime
+#define msleep_interruptible(s) DRM_UDELAY(s)
+#define timeval_to_ns(tvp) TICK_TO_NSEC(TIMEVAL_TO_TICK(tvp))
+#define ns_to_timeval(nsec, tvp) TICK_TO_TIMEVAL(NSEC_TO_TICK(nsec), tvp)
+
+#define GFP_KERNEL KM_SLEEP
+#define GFP_ATOMIC KM_SLEEP
+
+#define KHZ2PICOS(a) (1000000000UL/(a))
+
+#define udelay drv_usecwait
+#define mdelay(x) udelay((x) * 1000)
+#define msleep(x) mdelay((x))
+#define msecs_to_jiffies(x) drv_usectohz((x) * 1000)
+#define jiffies_to_msecs(x) drv_hztousec(x) / 1000
+#define time_after(a,b) ((long)(b) - (long)(a) < 0)
+#define time_after_eq(a,b) ((long)(a) - (long)(b) >= 0)
+#define time_before_eq(a,b) time_after_eq(b,a)
+#define time_in_range(a,b,c) \
+ (time_after_eq(a,b) && \
+ time_before_eq(a,c))
+
+#define jiffies ddi_get_lbolt()
+
+#ifdef _BIG_ENDIAN
+#define cpu_to_le16(x) LE_16(x)
+#define le16_to_cpu(x) LE_16(x)
+#else
+#define cpu_to_le16(x) (x)
+#define le16_to_cpu(x) (x)
+#endif
+
+#define swap(a, b) \
+ do { int tmp = (a); (a) = (b); (b) = tmp; } while (__lintzero)
+
+#define abs(x) ((x < 0) ? -x : x)
+
+#define div_u64(x, y) ((unsigned long long)(x))/((unsigned long long)(y)) /* XXX FIXME */
+#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+
+#define put_user(val,ptr) DRM_COPY_TO_USER(ptr,(&val),sizeof(val))
+#define get_user(x,ptr) DRM_COPY_FROM_USER((&x),ptr,sizeof(x))
+#define copy_to_user DRM_COPY_TO_USER
+#define copy_from_user DRM_COPY_FROM_USER
+#define unlikely(a) (a)
+
+#define AGP_USER_TYPES (1 << 16)
+#define AGP_USER_MEMORY (AGP_USER_TYPES)
+#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
+
+#define ALIGN(x, a) (((x) + ((a) - 1)) & ~((a) - 1))
+
+#define page_to_phys(x) *(uint32_t *)(uintptr_t)(x)
+#define in_dbg_master() 0
+
+#define BITS_PER_BYTE 8
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define POS_DIV_ROUND_CLOSEST(x, d) ((x + (d / 2)) / d)
+#define POS_DIV_ROUND_UP_ULL(x, d) DIV_ROUND_UP(x,d)
+
+typedef unsigned long dma_addr_t;
+typedef uint64_t u64;
+typedef int64_t s64;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint16_t u16;
+typedef uint8_t u8;
+typedef uint_t irqreturn_t;
+
+typedef int bool;
+
+#define true (1)
+#define false (0)
+
+#define __init
+#define __exit
+#define __iomem
+
+#ifdef _ILP32
+typedef u32 resource_size_t;
+#else /* _LP64 */
+typedef u64 resource_size_t;
+#endif
+
+typedef struct kref {
+ atomic_t refcount;
+} kref_t;
+
+extern void kref_init(struct kref *kref);
+extern void kref_get(struct kref *kref);
+extern void kref_put(struct kref *kref, void (*release)(struct kref *kref));
+
+extern unsigned int hweight16(unsigned int w);
+
+extern long IS_ERR(const void *ptr);
+#define IS_ERR_OR_NULL(ptr) (!ptr || IS_ERR(ptr))
+
+#ifdef __lint
+/*
+ * The actual code for _wait_for() causes Solaris lint2 to fail, though
+ * by all appearances, the code actually works (may try and peek at
+ * the compiled code to understand why). So to get around the problem,
+ * we create a special lint version for _wait_for().
+ */
+#define _wait_for(COND, MS, W) (! (COND))
+#else /* !__lint */
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (! (COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W) udelay(W); \
+ } \
+ ret__; \
+})
+#endif /* __lint */
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+
+#endif /* __DRM_LINUX_H__ */
diff --git a/usr/src/uts/common/drm/drm_linux_list.h b/usr/src/uts/common/drm/drm_linux_list.h
new file mode 100644
index 0000000..756d29f
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_linux_list.h
@@ -0,0 +1,165 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * drm_linux_list.h -- linux list functions for the BSDs.
+ * Created: Mon Apr 7 14:30:16 1999 by anholt@FreeBSD.org
+ */
+/*
+ * -
+ * Copyright 2003 Eric Anholt
+ * Copyright (c) 2009, 2012, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <anholt@FreeBSD.org>
+ *
+ */
+
+#ifndef _DRM_LINUX_LIST_H_
+#define _DRM_LINUX_LIST_H_
+
+#include <sys/types.h>
+#include <sys/param.h>
+struct list_head {
+ struct list_head *next, *prev;
+ caddr_t contain_ptr;
+};
+
+/* Cheat, assume the list_head is at the start of the struct */
+#define container_of(ptr, type, member) \
+ ((type *)(uintptr_t)((char *)(ptr) - (unsigned long)(&((type *)0)->member)))
+
+#define list_entry(ptr, type, member) \
+ ptr ? ((type *)(uintptr_t)(ptr->contain_ptr)) : NULL
+
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+#define list_empty(head) \
+ ((head)->next == head)
+
+#define INIT_LIST_HEAD(head) \
+do { \
+ (head)->next = head; \
+ (head)->prev = head; \
+ (head)->contain_ptr = NULL; \
+} while (*"\0")
+
+#define list_add(ptr, head, entry) \
+do { \
+ struct list_head *n_node = (head)->next; \
+ (ptr)->prev = head; \
+ (ptr)->next = (head)->next; \
+ n_node->prev = ptr; \
+ (head)->next = ptr; \
+ (ptr)->contain_ptr = entry; \
+} while (*"\0")
+
+#define list_add_tail(ptr, head, entry) \
+do { \
+ struct list_head *p_node = (head)->prev; \
+ (ptr)->prev = (head)->prev; \
+ (ptr)->next = head; \
+ p_node->next = ptr; \
+ (head)->prev = ptr; \
+ (ptr)->contain_ptr = entry; \
+} while (*"\0")
+
+#define list_del(ptr) \
+do { \
+ struct list_head *n_node = (ptr)->next; \
+ struct list_head *p_node = (ptr)->prev; \
+ n_node->prev = (ptr)->prev; \
+ p_node->next = (ptr)->next; \
+ (ptr)->prev = NULL; \
+ (ptr)->next = NULL; \
+} while (*"\0")
+
+#define list_del_init(ptr) \
+do { \
+ list_del(ptr); \
+ INIT_LIST_HEAD(ptr); \
+} while (*"\0")
+
+#define list_move(ptr, head, entry) \
+do { \
+ list_del(ptr); \
+ list_add(ptr, head, entry); \
+} while (*"\0")
+
+
+#define list_move_tail(ptr, head, entry) \
+do { \
+ list_del(ptr); \
+ list_add_tail(ptr, head, entry); \
+} while (*"\0")
+
+#define list_splice(list, be, ne) \
+do { \
+ if (!list_empty(list)) { \
+ struct list_head *first = (list)->next; \
+ struct list_head *last = (list)->prev; \
+ first->prev = be; \
+ (be)->next = first; \
+ last->next = ne; \
+ (ne)->prev = last; \
+ } \
+} while (*"\0")
+
+#define list_replace(old, new) \
+do { \
+ struct list_head *old_list = old; \
+ struct list_head *new_list = new; \
+ new_list->next = old_list->next; \
+ new_list->next->prev = new_list; \
+ new_list->prev = old_list->prev; \
+ new_list->prev->next = new_list; \
+} while (*"\0")
+
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != head; pos = (pos)->next)
+
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = (pos)->next; \
+ pos != head; \
+ pos = n, n = n->next)
+
+#define list_for_each_entry(pos, type, head, member) \
+ for (pos = list_entry((head)->next, type, member); pos; \
+ pos = list_entry(pos->member.next, type, member))
+
+#define list_for_each_entry_safe(pos, n, type, head, member) \
+ for (pos = list_entry((head)->next, type, member), \
+ n = pos ? list_entry(pos->member.next, type, member) : pos; \
+ pos; \
+ pos = n, \
+ n = list_entry((n ? n->member.next : (head)->next), type, member))
+
+#define list_for_each_entry_continue_reverse(pos, type, head, member) \
+ for (pos = list_entry(pos->member.prev, type, member); \
+ pos; \
+ pos = list_entry(pos->member.prev, type, member))
+#endif /* _DRM_LINUX_LIST_H_ */
diff --git a/usr/src/uts/common/drm/drm_mm.h b/usr/src/uts/common/drm/drm_mm.h
new file mode 100644
index 0000000..5578b41
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_mm.h
@@ -0,0 +1,242 @@
+/* BEGIN CSTYLED */
+
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
+ */
+
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _DRM_MM_H_
+#define _DRM_MM_H_
+
+/*
+ * Generic range manager structs
+ */
+
+struct drm_mm_node {
+ struct list_head node_list;
+ struct list_head hole_stack;
+ unsigned hole_follows;
+ unsigned scanned_block;
+ unsigned scanned_prev_free;
+ unsigned scanned_next_free;
+ unsigned scanned_preceeds_hole;
+ unsigned allocated;
+ unsigned long color;
+ unsigned long start;
+ unsigned long size;
+ struct drm_mm *mm;
+};
+
+struct drm_mm {
+ /* List of free memory blocks, most recently freed ordered. */
+ struct list_head hole_stack;
+ /* head_node.node_list is the list of all memory nodes, ordered
+ * according to the (increasing) start address of the memory node. */
+ struct drm_mm_node head_node;
+ struct list_head unused_nodes;
+ int num_unused;
+ spinlock_t unused_lock;
+ unsigned int scan_check_range : 1;
+ unsigned scan_alignment;
+ unsigned long scan_color;
+ unsigned long scan_size;
+ unsigned long scan_hit_start;
+ unsigned long scan_hit_end;
+ unsigned scanned_blocks;
+ unsigned long scan_start;
+ unsigned long scan_end;
+ struct drm_mm_node *prev_scanned_node;
+
+ void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
+ unsigned long *start, unsigned long *end);
+};
+
+static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
+{
+ return node->allocated;
+}
+
+extern bool drm_mm_initialized(struct drm_mm *mm);
+extern unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node);
+extern unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node);
+extern unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node);
+extern unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node);
+#define drm_mm_for_each_node(entry, type, mm) list_for_each_entry(entry, type, \
+ &(mm)->head_node.node_list, \
+ node_list)
+#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
+ for (entry = (mm)->prev_scanned_node, \
+ next = entry ? list_entry(entry->node_list.next, \
+ struct drm_mm_node, node_list) : NULL; \
+ entry != NULL; entry = next, \
+ next = entry ? list_entry(entry->node_list.next, \
+ struct drm_mm_node, node_list) : NULL) \
+
+/* Note that we need to unroll list_for_each_entry in order to inline
+ * setting hole_start and hole_end on each iteration and keep the
+ * macro sane.
+ */
+#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
+ for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
+ (entry) && (&entry->hole_stack != &(mm)->hole_stack ? \
+ hole_start = drm_mm_hole_node_start(entry), \
+ hole_end = drm_mm_hole_node_end(entry), \
+ 1 : 0); \
+ entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
+
+/*
+ * Basic range manager support (drm_mm.c)
+ */
+extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
+ unsigned long start,
+ unsigned long size,
+ bool atomic);
+extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ int atomic);
+extern struct drm_mm_node *drm_mm_get_block_range_generic(
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ int atomic);
+extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment);
+
+extern struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment);
+
+extern struct drm_mm_node *drm_mm_get_block_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end);
+
+extern struct drm_mm_node *drm_mm_get_block_atomic_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end);
+
+extern int drm_mm_insert_node(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment);
+extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end);
+extern int drm_mm_insert_node_generic(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color);
+extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end);
+extern void drm_mm_put_block(struct drm_mm_node *cur);
+extern void drm_mm_remove_node(struct drm_mm_node *node);
+extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
+extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ bool best_match);
+extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
+ const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ bool best_match);
+extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ bool best_match);
+extern struct drm_mm_node *drm_mm_search_free_in_range(
+ const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ bool best_match);
+extern void drm_mm_init(struct drm_mm *mm,
+ unsigned long start,
+ unsigned long size);
+extern void drm_mm_takedown(struct drm_mm *mm);
+extern int drm_mm_clean(struct drm_mm *mm);
+extern int drm_mm_pre_get(struct drm_mm *mm);
+static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
+{
+ return block->mm;
+}
+
+void drm_mm_init_scan(struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color);
+void drm_mm_init_scan_with_range(struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end);
+int drm_mm_scan_add_block(struct drm_mm_node *node);
+int drm_mm_scan_remove_block(struct drm_mm_node *node);
+
+extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
+#ifdef CONFIG_DEBUG_FS
+int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
+#endif
+
+#endif /* _DRM_MM_H_ */
diff --git a/usr/src/uts/common/drm/drm_mode.h b/usr/src/uts/common/drm/drm_mode.h
new file mode 100644
index 0000000..cdaff66
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_mode.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
+ * Copyright (c) 2008 Red Hat Inc.
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * Copyright (c) 2007-2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_MODE_H
+#define _DRM_MODE_H
+
+#define DRM_DISPLAY_INFO_LEN 32
+#define DRM_CONNECTOR_NAME_LEN 32
+#define DRM_DISPLAY_MODE_LEN 32
+#define DRM_PROP_NAME_LEN 32
+
+#define DRM_MODE_TYPE_BUILTIN (1<<0)
+#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_PREFERRED (1<<3)
+#define DRM_MODE_TYPE_DEFAULT (1<<4)
+#define DRM_MODE_TYPE_USERDEF (1<<5)
+#define DRM_MODE_TYPE_DRIVER (1<<6)
+
+/* Video mode flags */
+/* bit compatible with the xorg definitions. */
+#define DRM_MODE_FLAG_PHSYNC (1<<0)
+#define DRM_MODE_FLAG_NHSYNC (1<<1)
+#define DRM_MODE_FLAG_PVSYNC (1<<2)
+#define DRM_MODE_FLAG_NVSYNC (1<<3)
+#define DRM_MODE_FLAG_INTERLACE (1<<4)
+#define DRM_MODE_FLAG_DBLSCAN (1<<5)
+#define DRM_MODE_FLAG_CSYNC (1<<6)
+#define DRM_MODE_FLAG_PCSYNC (1<<7)
+#define DRM_MODE_FLAG_NCSYNC (1<<8)
+#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
+#define DRM_MODE_FLAG_BCAST (1<<10)
+#define DRM_MODE_FLAG_PIXMUX (1<<11)
+#define DRM_MODE_FLAG_DBLCLK (1<<12)
+#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
+#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
+#define DRM_MODE_FLAG_3D_NONE (0<<14)
+#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
+#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
+#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
+#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14)
+#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14)
+#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
+#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
+#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
+
+
+/* DPMS flags */
+/* bit compatible with the xorg definitions. */
+#define DRM_MODE_DPMS_ON 0
+#define DRM_MODE_DPMS_STANDBY 1
+#define DRM_MODE_DPMS_SUSPEND 2
+#define DRM_MODE_DPMS_OFF 3
+
+/* Scaling mode options */
+#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or
+ software can still scale) */
+#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */
+#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
+#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
+
+/* Dithering mode options */
+#define DRM_MODE_DITHERING_OFF 0
+#define DRM_MODE_DITHERING_ON 1
+#define DRM_MODE_DITHERING_AUTO 2
+
+/* Dirty info options */
+#define DRM_MODE_DIRTY_OFF 0
+#define DRM_MODE_DIRTY_ON 1
+#define DRM_MODE_DIRTY_ANNOTATE 2
+
+struct drm_mode_modeinfo {
+ __u32 clock;
+ __u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
+ __u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
+
+ __u32 vrefresh;
+
+ __u32 flags;
+ __u32 type;
+ char name[DRM_DISPLAY_MODE_LEN];
+};
+
+struct drm_mode_card_res {
+ __u64 fb_id_ptr;
+ __u64 crtc_id_ptr;
+ __u64 connector_id_ptr;
+ __u64 encoder_id_ptr;
+ __u32 count_fbs;
+ __u32 count_crtcs;
+ __u32 count_connectors;
+ __u32 count_encoders;
+ __u32 min_width, max_width;
+ __u32 min_height, max_height;
+};
+
+struct drm_mode_crtc {
+ __u64 set_connectors_ptr;
+ __u32 count_connectors;
+
+ __u32 crtc_id; /**< Id */
+ __u32 fb_id; /**< Id of framebuffer */
+
+ __u32 x, y; /**< Position on the frameuffer */
+
+ __u32 gamma_size;
+ __u32 mode_valid;
+ struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
+#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
+
+/* Planes blend with or override other bits on the CRTC */
+struct drm_mode_set_plane {
+ __u32 plane_id;
+ __u32 crtc_id;
+ __u32 fb_id; /* fb object contains surface format type */
+ __u32 flags;
+
+ /* Signed dest location allows it to be partially off screen */
+ __s32 crtc_x, crtc_y;
+ __u32 crtc_w, crtc_h;
+
+ /* Source values are 16.16 fixed point */
+ __u32 src_x, src_y;
+ __u32 src_h, src_w;
+};
+
+struct drm_mode_get_plane {
+ __u32 plane_id;
+
+ __u32 crtc_id;
+ __u32 fb_id;
+
+ __u32 possible_crtcs;
+ __u32 gamma_size;
+
+ __u32 count_format_types;
+ __u64 format_type_ptr;
+};
+
+struct drm_mode_get_plane_res {
+ __u64 plane_id_ptr;
+ __u32 count_planes;
+};
+
+#define DRM_MODE_ENCODER_NONE 0
+#define DRM_MODE_ENCODER_DAC 1
+#define DRM_MODE_ENCODER_TMDS 2
+#define DRM_MODE_ENCODER_LVDS 3
+#define DRM_MODE_ENCODER_TVDAC 4
+#define DRM_MODE_ENCODER_VIRTUAL 5
+#define DRM_MODE_ENCODER_DSI 6
+#define DRM_MODE_ENCODER_DPMST 7
+
+struct drm_mode_get_encoder {
+ __u32 encoder_id;
+ __u32 encoder_type;
+
+ __u32 crtc_id; /**< Id of crtc */
+
+ __u32 possible_crtcs;
+ __u32 possible_clones;
+};
+
+/* This is for connectors with multiple signal types. */
+/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
+#define DRM_MODE_SUBCONNECTOR_Automatic 0
+#define DRM_MODE_SUBCONNECTOR_Unknown 0
+#define DRM_MODE_SUBCONNECTOR_DVID 3
+#define DRM_MODE_SUBCONNECTOR_DVIA 4
+#define DRM_MODE_SUBCONNECTOR_Composite 5
+#define DRM_MODE_SUBCONNECTOR_SVIDEO 6
+#define DRM_MODE_SUBCONNECTOR_Component 8
+#define DRM_MODE_SUBCONNECTOR_SCART 9
+
+#define DRM_MODE_CONNECTOR_Unknown 0
+#define DRM_MODE_CONNECTOR_VGA 1
+#define DRM_MODE_CONNECTOR_DVII 2
+#define DRM_MODE_CONNECTOR_DVID 3
+#define DRM_MODE_CONNECTOR_DVIA 4
+#define DRM_MODE_CONNECTOR_Composite 5
+#define DRM_MODE_CONNECTOR_SVIDEO 6
+#define DRM_MODE_CONNECTOR_LVDS 7
+#define DRM_MODE_CONNECTOR_Component 8
+#define DRM_MODE_CONNECTOR_9PinDIN 9
+#define DRM_MODE_CONNECTOR_DisplayPort 10
+#define DRM_MODE_CONNECTOR_HDMIA 11
+#define DRM_MODE_CONNECTOR_HDMIB 12
+#define DRM_MODE_CONNECTOR_TV 13
+#define DRM_MODE_CONNECTOR_eDP 14
+#define DRM_MODE_CONNECTOR_VIRTUAL 15
+#define DRM_MODE_CONNECTOR_DSI 16
+
+struct drm_mode_get_connector {
+
+ __u64 encoders_ptr;
+ __u64 modes_ptr;
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+
+ __u32 count_modes;
+ __u32 count_props;
+ __u32 count_encoders;
+
+ __u32 encoder_id; /**< Current Encoder */
+ __u32 connector_id; /**< Id */
+ __u32 connector_type;
+ __u32 connector_type_id;
+
+ __u32 connection;
+ __u32 mm_width, mm_height; /**< HxW in millimeters */
+ __u32 subpixel;
+};
+
+#define DRM_MODE_PROP_PENDING (1<<0)
+#define DRM_MODE_PROP_RANGE (1<<1)
+#define DRM_MODE_PROP_IMMUTABLE (1<<2)
+#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
+#define DRM_MODE_PROP_BLOB (1<<4)
+#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
+
+/* non-extended types: legacy bitmask, one bit per type: */
+#define DRM_MODE_PROP_LEGACY_TYPE ( \
+ DRM_MODE_PROP_RANGE | \
+ DRM_MODE_PROP_ENUM | \
+ DRM_MODE_PROP_BLOB | \
+ DRM_MODE_PROP_BITMASK)
+
+/* extended-types: rather than continue to consume a bit per type,
+ * grab a chunk of the bits to use as integer type id.
+ */
+#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0
+#define DRM_MODE_PROP_TYPE(n) ((n) << 6)
+#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
+#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
+
+struct drm_mode_property_enum {
+ __u64 value;
+ char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_mode_get_property {
+ __u64 values_ptr; /* values and blob lengths */
+ __u64 enum_blob_ptr; /* enum and blob id ptrs */
+
+ __u32 prop_id;
+ __u32 flags;
+ char name[DRM_PROP_NAME_LEN];
+
+ __u32 count_values;
+ __u32 count_enum_blobs;
+};
+
+struct drm_mode_connector_set_property {
+ __u64 value;
+ __u32 prop_id;
+ __u32 connector_id;
+};
+
+#define DRM_MODE_OBJECT_CRTC 0xcccccccc
+#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
+#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
+#define DRM_MODE_OBJECT_MODE 0xdededede
+#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
+#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
+#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+
+struct drm_mode_obj_get_properties {
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u32 count_props;
+ __u32 obj_id;
+ __u32 obj_type;
+};
+
+struct drm_mode_obj_set_property {
+ __u64 value;
+ __u32 prop_id;
+ __u32 obj_id;
+ __u32 obj_type;
+};
+
+struct drm_mode_get_blob {
+ __u32 blob_id;
+ __u32 length;
+ __u64 data;
+};
+
+struct drm_mode_fb_cmd {
+ __u32 fb_id;
+ __u32 width, height;
+ __u32 pitch;
+ __u32 bpp;
+ __u32 depth;
+ /* driver specific handle */
+ __u32 handle;
+};
+
+#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
+#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
+
+struct drm_mode_fb_cmd2 {
+ __u32 fb_id;
+ __u32 width, height;
+ __u32 pixel_format; /* fourcc code from drm_fourcc.h */
+ __u32 flags;
+
+ /*
+ * In case of planar formats, this ioctl allows up to 4
+ * buffer objects with offsets and pitches per plane.
+ * The pitch and offset order is dictated by the fourcc,
+ * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
+ *
+ * YUV 4:2:0 image with a plane of 8 bit Y samples
+ * followed by an interleaved U/V plane containing
+ * 8 bit 2x2 subsampled colour difference samples.
+ *
+ * So it would consist of Y as offset[0] and UV as
+ * offset[1]. Note that offset[0] will generally
+ * be 0.
+ *
+ * To accommodate tiled, compressed, etc formats, a per-plane
+ * modifier can be specified. The default value of zero
+ * indicates "native" format as specified by the fourcc.
+ * Vendor specific modifier token. This allows, for example,
+ * different tiling/swizzling pattern on different planes.
+ * See discussion above of DRM_FORMAT_MOD_xxx.
+ */
+ __u32 handles[4];
+ __u32 pitches[4]; /* pitch for each plane */
+ __u32 offsets[4]; /* offset of each plane */
+ __u64 modifier[4]; /* ie, tiling, compressed (per plane) */
+};
+
+#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
+#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
+#define DRM_MODE_FB_DIRTY_FLAGS 0x03
+
+#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
+
+/*
+ * Mark a region of a framebuffer as dirty.
+ *
+ * Some hardware does not automatically update display contents
+ * as a hardware or software draw to a framebuffer. This ioctl
+ * allows userspace to tell the kernel and the hardware what
+ * regions of the framebuffer have changed.
+ *
+ * The kernel or hardware is free to update more then just the
+ * region specified by the clip rects. The kernel or hardware
+ * may also delay and/or coalesce several calls to dirty into a
+ * single update.
+ *
+ * Userspace may annotate the updates, the annotates are a
+ * promise made by the caller that the change is either a copy
+ * of pixels or a fill of a single color in the region specified.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
+ * the number of updated regions are half of num_clips given,
+ * where the clip rects are paired in src and dst. The width and
+ * height of each one of the pairs must match.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
+ * promises that the region specified of the clip rects is filled
+ * completely with a single color as given in the color argument.
+ */
+
+struct drm_mode_fb_dirty_cmd {
+ __u32 fb_id;
+ __u32 flags;
+ __u32 color;
+ __u32 num_clips;
+ __u64 clips_ptr;
+};
+
+struct drm_mode_mode_cmd {
+ __u32 connector_id;
+ struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_CURSOR_BO (1<<0)
+#define DRM_MODE_CURSOR_MOVE (1<<1)
+#define DRM_MODE_CURSOR_FLAGS (DRM_MODE_CURSOR_BO|DRM_MODE_CURSOR_MOVE)
+
+/*
+ * depending on the value in flags different members are used.
+ *
+ * CURSOR_BO uses
+ * crtc
+ * width
+ * height
+ * handle - if 0 turns the cursor off
+ *
+ * CURSOR_MOVE uses
+ * crtc
+ * x
+ * y
+ */
+struct drm_mode_cursor {
+ __u32 flags;
+ __u32 crtc_id;
+ __s32 x;
+ __s32 y;
+ __u32 width;
+ __u32 height;
+ /* driver specific handle */
+ __u32 handle;
+};
+
+struct drm_mode_cursor2 {
+ __u32 flags;
+ __u32 crtc_id;
+ __s32 x;
+ __s32 y;
+ __u32 width;
+ __u32 height;
+ /* driver specific handle */
+ __u32 handle;
+ __s32 hot_x;
+ __s32 hot_y;
+};
+
+struct drm_mode_crtc_lut {
+ __u32 crtc_id;
+ __u32 gamma_size;
+
+ /* pointers to arrays */
+ __u64 red;
+ __u64 green;
+ __u64 blue;
+};
+
+#define DRM_MODE_PAGE_FLIP_EVENT 0x01
+#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
+#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
+
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * This ioctl will ask KMS to schedule a page flip for the specified
+ * crtc. Once any pending rendering targeting the specified fb (as of
+ * ioctl time) has completed, the crtc will be reprogrammed to display
+ * that fb after the next vertical refresh. The ioctl returns
+ * immediately, but subsequent rendering to the current fb will block
+ * in the execbuffer ioctl until the page flip happens. If a page
+ * flip is already pending as the ioctl is called, EBUSY will be
+ * returned.
+ *
+ * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
+ * request that drm sends back a vblank event (see drm.h: struct
+ * drm_event_vblank) when the page flip is done. The user_data field
+ * passed in with this ioctl will be returned as the user_data field
+ * in the vblank event struct.
+ *
+ * The reserved field must be zero until we figure out something
+ * clever to use it for.
+ */
+
+struct drm_mode_crtc_page_flip {
+ __u32 crtc_id;
+ __u32 fb_id;
+ __u32 flags;
+ __u32 reserved;
+ __u64 user_data;
+};
+
+/* create a dumb scanout buffer */
+struct drm_mode_create_dumb {
+ __u32 height;
+ __u32 width;
+ __u32 bpp;
+ __u32 flags;
+ /* handle, pitch, size will be returned */
+ __u32 handle;
+ __u32 pitch;
+ __u64 size;
+};
+
+/* set up for mmap of a dumb scanout buffer */
+struct drm_mode_map_dumb {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+};
+
+struct drm_mode_destroy_dumb {
+ __u32 handle;
+};
+
+/* page-flip flags are valid, plus: */
+#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
+#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
+#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
+
+struct drm_mode_atomic {
+ __u32 flags;
+ __u32 count_objs;
+ __u64 objs_ptr;
+ __u64 count_props_ptr;
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u64 reserved;
+ __u64 user_data;
+};
+
+/**
+ * Create a new 'blob' data property, copying length bytes from data pointer,
+ * and returning new blob ID.
+ */
+struct drm_mode_create_blob {
+ /** Pointer to data to copy. */
+ __u64 data;
+ /** Length of data to copy. */
+ __u32 length;
+ /** Return: new property ID. */
+ __u32 blob_id;
+};
+
+/**
+ * Destroy a user-created blob property.
+ */
+struct drm_mode_destroy_blob {
+ __u32 blob_id;
+};
+
+
+#endif
diff --git a/usr/src/uts/common/drm/drm_os_solaris.h b/usr/src/uts/common/drm/drm_os_solaris.h
new file mode 100644
index 0000000..7b5e63b
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_os_solaris.h
@@ -0,0 +1,119 @@
+/*
+ * file drm_os_solaris.h
+ *
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Solaris OS abstractions
+ */
+
+#ifndef _DRM_OS_SOLARIS_H
+#define _DRM_OS_SOLARIS_H
+
+/* Only Solaris builds should be here */
+#if defined(__sun)
+
+/* A couple of "hints" definitions not needed for Solaris drivers */
+#ifndef __user
+#define __user
+#endif
+#ifndef __force
+#define __force
+#endif
+#ifndef __must_check
+#define __must_check
+#endif
+
+typedef uint64_t drm_u64_t;
+
+/*
+ * Some defines that are best put in ioccom.h, but live here till then.
+ */
+
+/* Reverse ioctl command lookup. */
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+
+/* Mask values that would otherwise not be in drm.h or ioccom.h */
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+/* Solaris uses a bit for none, but calls it IOC_VOID */
+#define _IOC_NONE 1U
+#define _IOC_WRITE 2U
+#define _IOC_READ 4U
+
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+#if defined(__sun)
+/* Things that should be defined in drmP.h */
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#endif /* __sun */
+
+#define XFREE86_VERSION(major, minor, patch, snap) \
+ ((major << 16) | (minor << 8) | patch)
+
+#ifndef CONFIG_XFREE86_VERSION
+#define CONFIG_XFREE86_VERSION XFREE86_VERSION(4, 1, 0, 0)
+#endif
+
+#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4, 1, 0, 0)
+#define DRM_PROC_DEVICES "/proc/devices"
+#define DRM_PROC_MISC "/proc/misc"
+#define DRM_PROC_DRM "/proc/drm"
+#define DRM_DEV_DRM "/dev/drm"
+#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+#define DRM_DEV_UID 0
+#define DRM_DEV_GID 0
+#endif /* CONFIG_XFREE86_VERSION < XFREE86_VERSION(4, 1, 0, 0) */
+
+#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4, 1, 0, 0)
+#ifdef __OpenBSD__
+#define DRM_MAJOR 81
+#endif
+#if defined(__linux__) || defined(__NetBSD__)
+#define DRM_MAJOR 226
+#endif
+#define DRM_MAX_MINORi 15
+#endif /* CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0) */
+
+
+#ifdef _KERNEL
+/* Defines that are only relevant for kernel modules and drivers */
+
+#ifdef __lint
+/* Don't lint these macros. */
+#define BUG_ON(a)
+#define WARN_ON(a)
+#else
+#define BUG_ON(a) ASSERT(!(a))
+#define WARN_ON(a) do { \
+ if(a) drm_debug_print(CE_WARN, __func__, __LINE__, #a);\
+ } while (__lintzero)
+#endif /* __lint */
+
+#define BUG() BUG_ON(1)
+
+#endif /* _KERNEL */
+#endif /* __sun */
+#endif /* _DRM_OS_SOLARIS_H */
diff --git a/usr/src/uts/common/drm/drm_pciids.h b/usr/src/uts/common/drm/drm_pciids.h
new file mode 100644
index 0000000..699cfaf
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_pciids.h
@@ -0,0 +1,250 @@
+/* BEGIN CSTYLED */
+
+/*
+ * This file is auto-generated from the drm_pciids.txt in the DRM CVS
+ * Please contact dri-devel@lists.sf.net to add new cards to this list
+ */
+
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef _DRM_PCIIDS_H_
+#define _DRM_PCIIDS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define radeon_PCI_IDS\
+ {0x1002, 0x4136, CHIP_RS100|RADEON_IS_IGP, \
+ "ATI Radeon RS100 IGP 320M"}, \
+ {0x1002, 0x4137, CHIP_RS200|RADEON_IS_IGP, "ATI Radeon RS200 IGP"}, \
+ {0x1002, 0x4144, CHIP_R300, "ATI Radeon AD 9500 Pro"}, \
+ {0x1002, 0x4145, CHIP_R300, "ATI Radeon AE 9700 Pro"}, \
+ {0x1002, 0x4146, CHIP_R300, "ATI Radeon AF 9700 Pro"}, \
+ {0x1002, 0x4147, CHIP_R300, "ATI FireGL AG Z1/X1"}, \
+ {0x1002, 0x4150, CHIP_RV350, "ATI Radeon AP 9600"}, \
+ {0x1002, 0x4151, CHIP_RV350, "ATI Radeon AQ 9600"}, \
+ {0x1002, 0x4152, CHIP_RV350, "ATI Radeon AR 9600"}, \
+ {0x1002, 0x4153, CHIP_RV350, "ATI Radeon AS 9600 AS"}, \
+ {0x1002, 0x4154, CHIP_RV350, "ATI FireGL AT T2"}, \
+ {0x1002, 0x4156, CHIP_RV350, "ATI FireGL AV T2"}, \
+ {0x1002, 0x4237, CHIP_RS200|RADEON_IS_IGP, "ATI Radeon RS250 IGP"}, \
+ {0x1002, 0x4242, CHIP_R200, "ATI Radeon BB R200 AIW 8500DV"}, \
+ {0x1002, 0x4243, CHIP_R200, "ATI Radeon BC R200"}, \
+ {0x1002, 0x4336, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY, \
+ "ATI Radeon RS100 Mobility U1"}, \
+ {0x1002, 0x4337, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY, \
+ "ATI Radeon RS200 Mobility IGP 340M"}, \
+ {0x1002, 0x4437, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY, \
+ "ATI Radeon RS250 Mobility IGP"}, \
+ {0x1002, 0x4966, CHIP_RV250, "ATI Radeon If R250 9000"}, \
+ {0x1002, 0x4967, CHIP_RV250, "ATI Radeon Ig R250 9000"}, \
+ {0x1002, 0x4A49, CHIP_R420, "ATI Radeon JI R420 X800PRO"}, \
+ {0x1002, 0x4A4B, CHIP_R420, "ATI Radeon JK R420 X800 XT"}, \
+ {0x1002, 0x4C57, CHIP_RV200|RADEON_IS_MOBILITY, \
+ "ATI Radeon LW RV200 Mobility 7500 M7"}, \
+ {0x1002, 0x4C58, CHIP_RV200|RADEON_IS_MOBILITY, \
+ "ATI Radeon LX RV200 Mobility FireGL 7800 M7"}, \
+ {0x1002, 0x4C59, CHIP_RV100|RADEON_IS_MOBILITY, \
+ "ATI Radeon LY RV100 Mobility M6"}, \
+ {0x1002, 0x4C5A, CHIP_RV100|RADEON_IS_MOBILITY, \
+ "ATI Radeon LZ RV100 Mobility M6"}, \
+ {0x1002, 0x4C64, CHIP_RV250|RADEON_IS_MOBILITY, \
+ "ATI Radeon Ld RV250 Mobility 9000 M9"}, \
+ {0x1002, 0x4C66, CHIP_RV250|RADEON_IS_MOBILITY, \
+ "ATI Radeon Lf R250 Mobility 9000 M9"}, \
+ {0x1002, 0x4C67, CHIP_RV250|RADEON_IS_MOBILITY, \
+ "ATI Radeon Lg R250 Mobility 9000 M9"}, \
+ {0x1002, 0x4E44, CHIP_R300, "ATI Radeon ND R300 9700 Pro"}, \
+ {0x1002, 0x4E45, CHIP_R300, "ATI Radeon NE R300 9500 Pro"}, \
+ {0x1002, 0x4E46, CHIP_RV350, "ATI Radeon NF RV350 9600"}, \
+ {0x1002, 0x4E47, CHIP_R300, "ATI Radeon NG R300 FireGL X1"}, \
+ {0x1002, 0x4E48, CHIP_R350, "ATI Radeon NH R350 9800 Pro"}, \
+ {0x1002, 0x4E49, CHIP_R350, "ATI Radeon NI R350 9800"}, \
+ {0x1002, 0x4E4A, CHIP_RV350, "ATI Radeon NJ RV350 9800 XT"}, \
+ {0x1002, 0x4E4B, CHIP_R350, "ATI Radeon NK R350 FireGL X2"}, \
+ {0x1002, 0x4E50, CHIP_RV350|RADEON_IS_MOBILITY, \
+ "ATI Radeon RV300 Mobility 9600 M10"}, \
+ {0x1002, 0x4E51, CHIP_RV350|RADEON_IS_MOBILITY, \
+ "ATI Radeon RV350 Mobility 9600 M10 NQ"}, \
+ {0x1002, 0x4E54, CHIP_RV350|RADEON_IS_MOBILITY, \
+ "ATI Radeon FireGL T2 128"}, \
+ {0x1002, 0x4E56, CHIP_RV350|RADEON_IS_MOBILITY, \
+ "ATI Radeon FireGL Mobility T2e"}, \
+ {0x1002, 0x5144, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QD R100"}, \
+ {0x1002, 0x5145, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QE R100"}, \
+ {0x1002, 0x5146, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QF R100"}, \
+ {0x1002, 0x5147, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QG R100"}, \
+ {0x1002, 0x5148, CHIP_R200, "ATI Radeon QH R200 8500"}, \
+ {0x1002, 0x5149, CHIP_R200, "ATI Radeon QI R200"}, \
+ {0x1002, 0x514A, CHIP_R200, "ATI Radeon QJ R200"}, \
+ {0x1002, 0x514B, CHIP_R200, "ATI Radeon QK R200"}, \
+ {0x1002, 0x514C, CHIP_R200, "ATI Radeon QL R200 8500 LE"}, \
+ {0x1002, 0x514D, CHIP_R200, "ATI Radeon QM R200 9100"}, \
+ {0x1002, 0x514E, CHIP_R200, "ATI Radeon QN R200 8500 LE"}, \
+ {0x1002, 0x514F, CHIP_R200, "ATI Radeon QO R200 8500 LE"}, \
+ {0x1002, 0x5157, CHIP_RV200, "ATI Radeon QW RV200 7500"}, \
+ {0x1002, 0x5158, CHIP_RV200, "ATI Radeon QX RV200 7500"}, \
+ {0x1002, 0x5159, CHIP_RV100, "ATI Radeon QY RV100 7000/VE"}, \
+ {0x1002, 0x515A, CHIP_RV100, "ATI Radeon QZ RV100 7000/VE"}, \
+ {0x1002, 0x515E, CHIP_RV100, "ATI ES1000 RN50"}, \
+ {0x1002, 0x5168, CHIP_R200, "ATI Radeon Qh R200"}, \
+ {0x1002, 0x5169, CHIP_R200, "ATI Radeon Qi R200"}, \
+ {0x1002, 0x516A, CHIP_R200, "ATI Radeon Qj R200"}, \
+ {0x1002, 0x516B, CHIP_R200, "ATI Radeon Qk R200"}, \
+ {0x1002, 0x516C, CHIP_R200, "ATI Radeon Ql R200"}, \
+ {0x1002, 0x5460, CHIP_RV350, "ATI Radeon X300"}, \
+ {0x1002, 0x554F, CHIP_R350, "ATI Radeon X800"}, \
+ {0x1002, 0x5653, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, \
+ "ATI Radeon Mobility X700 M26"}, \
+ {0x1002, 0x5834, CHIP_RS300|RADEON_IS_IGP, "ATI Radeon RS300 IGP"}, \
+ {0x1002, 0x5835, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY, \
+ "ATI Radeon RS300 Mobility IGP"}, \
+ {0x1002, 0x5836, CHIP_RS300|RADEON_IS_IGP, "ATI Radeon RS300 IGP"}, \
+ {0x1002, 0x5837, CHIP_RS300|RADEON_IS_IGP, "ATI Radeon RS300 IGP"}, \
+ {0x1002, 0x5960, CHIP_RV280, "ATI Radeon RV280 9200"}, \
+ {0x1002, 0x5961, CHIP_RV280, "ATI Radeon RV280 9200 SE"}, \
+ {0x1002, 0x5962, CHIP_RV280, "ATI Radeon RV280 9200"}, \
+ {0x1002, 0x5963, CHIP_RV280, "ATI Radeon RV280 9200"}, \
+ {0x1002, 0x5964, CHIP_RV280, "ATI Radeon RV280 9200 SE"}, \
+ {0x1002, 0x5968, CHIP_RV280, "ATI Radeon RV280 9200"}, \
+ {0x1002, 0x5969, CHIP_RV100, "ATI ES1000 RN50"}, \
+ {0x1002, 0x596A, CHIP_RV280, "ATI Radeon RV280 9200"}, \
+ {0x1002, 0x596B, CHIP_RV280, "ATI Radeon RV280 9200"}, \
+ {0x1002, 0x5b60, CHIP_RV350, "ATI Radeon RV370 X300SE"}, \
+ {0x1002, 0x5b64, CHIP_RV380, "SUN XVR-300"}, \
+ {0x1002, 0x5c61, CHIP_RV280|RADEON_IS_MOBILITY, \
+ "ATI Radeon RV280 Mobility"}, \
+ {0x1002, 0x5c62, CHIP_RV280, "ATI Radeon RV280"}, \
+ {0x1002, 0x5c63, CHIP_RV280|RADEON_IS_MOBILITY, \
+ "ATI Radeon RV280 Mobility"}, \
+ {0x1002, 0x5c64, CHIP_RV280, "ATI Radeon RV280"}, \
+ {0x1002, 0x5d4d, CHIP_R350, "ATI Radeon R480"}, \
+ {0, 0, 0, NULL}
+
+#define r128_PCI_IDS\
+ {0x1002, 0x4c45, 0, "ATI Rage 128 Mobility LE (PCI)"}, \
+ {0x1002, 0x4c46, 0, "ATI Rage 128 Mobility LF (AGP)"}, \
+ {0x1002, 0x4d46, 0, "ATI Rage 128 Mobility MF (AGP)"}, \
+ {0x1002, 0x4d4c, 0, "ATI Rage 128 Mobility ML (AGP)"}, \
+ {0x1002, 0x5041, 0, "ATI Rage 128 Pro PA (PCI)"}, \
+ {0x1002, 0x5042, 0, "ATI Rage 128 Pro PB (AGP)"}, \
+ {0x1002, 0x5043, 0, "ATI Rage 128 Pro PC (AGP)"}, \
+ {0x1002, 0x5044, 0, "ATI Rage 128 Pro PD (PCI)"}, \
+ {0x1002, 0x5045, 0, "ATI Rage 128 Pro PE (AGP)"}, \
+ {0x1002, 0x5046, 0, "ATI Rage 128 Pro PF (AGP)"}, \
+ {0x1002, 0x5047, 0, "ATI Rage 128 Pro PG (PCI)"}, \
+ {0x1002, 0x5048, 0, "ATI Rage 128 Pro PH (AGP)"}, \
+ {0x1002, 0x5049, 0, "ATI Rage 128 Pro PI (AGP)"}, \
+ {0x1002, 0x504A, 0, "ATI Rage 128 Pro PJ (PCI)"}, \
+ {0x1002, 0x504B, 0, "ATI Rage 128 Pro PK (AGP)"}, \
+ {0x1002, 0x504C, 0, "ATI Rage 128 Pro PL (AGP)"}, \
+ {0x1002, 0x504D, 0, "ATI Rage 128 Pro PM (PCI)"}, \
+ {0x1002, 0x504E, 0, "ATI Rage 128 Pro PN (AGP)"}, \
+ {0x1002, 0x504F, 0, "ATI Rage 128 Pro PO (AGP)"}, \
+ {0x1002, 0x5050, 0, "ATI Rage 128 Pro PP (PCI)"}, \
+ {0x1002, 0x5051, 0, "ATI Rage 128 Pro PQ (AGP)"}, \
+ {0x1002, 0x5052, 0, "ATI Rage 128 Pro PR (PCI)"}, \
+ {0x1002, 0x5053, 0, "ATI Rage 128 Pro PS (PCI)"}, \
+ {0x1002, 0x5054, 0, "ATI Rage 128 Pro PT (AGP)"}, \
+ {0x1002, 0x5055, 0, "ATI Rage 128 Pro PU (AGP)"}, \
+ {0x1002, 0x5056, 0, "ATI Rage 128 Pro PV (PCI)"}, \
+ {0x1002, 0x5057, 0, "ATI Rage 128 Pro PW (AGP)"}, \
+ {0x1002, 0x5058, 0, "ATI Rage 128 Pro PX (AGP)"}, \
+ {0x1002, 0x5245, 0, "ATI Rage 128 RE (PCI)"}, \
+ {0x1002, 0x5246, 0, "ATI Rage 128 RF (AGP)"}, \
+ {0x1002, 0x5247, 0, "ATI Rage 128 RG (AGP)"}, \
+ {0x1002, 0x524b, 0, "ATI Rage 128 RK (PCI)"}, \
+ {0x1002, 0x524c, 0, "ATI Rage 128 RL (AGP)"}, \
+ {0x1002, 0x534d, 0, "ATI Rage 128 SM (AGP)"}, \
+ {0x1002, 0x5446, 0, "ATI Rage 128 Pro Ultra TF (AGP)"}, \
+ {0x1002, 0x544C, 0, "ATI Rage 128 Pro Ultra TL (AGP)"}, \
+ {0x1002, 0x5452, 0, "ATI Rage 128 Pro Ultra TR (AGP)"}, \
+ {0, 0, 0, NULL}
+
+#define mach64_PCI_IDS\
+ {0x1002, 0x4749, 0, "3D Rage Pro"}, \
+ {0x1002, 0x4750, 0, "3D Rage Pro 215GP"}, \
+ {0x1002, 0x4751, 0, "3D Rage Pro 215GQ"}, \
+ {0x1002, 0x4742, 0, "3D Rage Pro AGP 1X/2X"}, \
+ {0x1002, 0x4744, 0, "3D Rage Pro AGP 1X"}, \
+ {0x1002, 0x4c49, 0, "3D Rage LT Pro"}, \
+ {0x1002, 0x4c50, 0, "3D Rage LT Pro"}, \
+ {0x1002, 0x4c51, 0, "3D Rage LT Pro"}, \
+ {0x1002, 0x4c42, 0, "3D Rage LT Pro AGP-133"}, \
+ {0x1002, 0x4c44, 0, "3D Rage LT Pro AGP-66"}, \
+ {0x1002, 0x474c, 0, "Rage XC"}, \
+ {0x1002, 0x474f, 0, "Rage XL"}, \
+ {0x1002, 0x4752, 0, "Rage XL"}, \
+ {0x1002, 0x4753, 0, "Rage XC"}, \
+ {0x1002, 0x474d, 0, "Rage XL AGP 2X"}, \
+ {0x1002, 0x474e, 0, "Rage XC AGP"}, \
+ {0x1002, 0x4c52, 0, "Rage Mobility P/M"}, \
+ {0x1002, 0x4c53, 0, "Rage Mobility L"}, \
+ {0x1002, 0x4c4d, 0, "Rage Mobility P/M AGP 2X"}, \
+ {0x1002, 0x4c4e, 0, "Rage Mobility L AGP 2X"}, \
+ {0, 0, 0, NULL}
+
+#define sisdrv_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define tdfx_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define viadrv_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define i810_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define i830_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define gamma_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define savage_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define ffb_PCI_IDS \
+ {0, 0, 0, NULL}
+
+#define i915_PCI_IDS\
+ {0x8086, 0x2562, CHIP_I8XX, "Intel i845G GMCH"}, \
+ {0x8086, 0x3582, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
+ {0x8086, 0x2572, CHIP_I8XX, "Intel i865G GMCH"}, \
+ {0x8086, 0x2582, CHIP_I9XX|CHIP_I915, "Intel i915G"}, \
+ {0x8086, 0x2592, CHIP_I9XX|CHIP_I915, "Intel i915GM"}, \
+ {0x8086, 0x2772, CHIP_I9XX|CHIP_I915, "Intel i945G"}, \
+ {0x8086, 0x27A2, CHIP_I9XX|CHIP_I915, "Intel i945GM"}, \
+ {0x8086, 0x27AE, CHIP_I9XX|CHIP_I915, "Intel i945GME"}, \
+ {0x8086, 0x2972, CHIP_I9XX|CHIP_I965, "Intel i946GZ"}, \
+ {0x8086, 0x2982, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
+ {0x8086, 0x2992, CHIP_I9XX|CHIP_I965, "Intel i965Q"}, \
+ {0x8086, 0x29A2, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
+ {0x8086, 0x2A02, CHIP_I9XX|CHIP_I965, "Intel i965GM"}, \
+ {0x8086, 0x2A12, CHIP_I9XX|CHIP_I965, "Intel i965GME/GLE"}, \
+ {0x8086, 0x29C2, CHIP_I9XX|CHIP_I915, "Intel G33"}, \
+ {0x8086, 0x29B2, CHIP_I9XX|CHIP_I915, "Intel Q35"}, \
+ {0x8086, 0x29D2, CHIP_I9XX|CHIP_I915, "Intel Q33"}, \
+ {0x8086, 0x2A42, CHIP_I9XX|CHIP_I965, "Intel GM45"}, \
+ {0x8086, 0x2E02, CHIP_I9XX|CHIP_I965, "Intel EL"}, \
+ {0x8086, 0x2E12, CHIP_I9XX|CHIP_I965, "Intel Q45"}, \
+ {0x8086, 0x2E22, CHIP_I9XX|CHIP_I965, "Intel G45"}, \
+ {0x8086, 0x2E32, CHIP_I9XX|CHIP_I965, "Intel G41"}, \
+ {0x8086, 0x42, CHIP_I9XX|CHIP_I965, "Intel IGDNG_D"}, \
+ {0x8086, 0x46, CHIP_I9XX|CHIP_I965, "Intel IGDNG_M"}, \
+ {0x8086, 0x2E42, CHIP_I9XX|CHIP_I965, "Intel B43"}, \
+ {0, 0, 0, NULL}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _DRM_PCIIDS_H_ */
diff --git a/usr/src/uts/common/drm/drm_rect.h b/usr/src/uts/common/drm/drm_rect.h
new file mode 100644
index 0000000..2fdedf1
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_rect.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef DRM_RECT_H
+#define DRM_RECT_H
+
+/**
+ * DOC: rect utils
+ *
+ * Utility functions to help manage rectangular areas for
+ * clipping, scaling, etc. calculations.
+ */
+
+/**
+ * struct drm_rect - two dimensional rectangle
+ * @x1: horizontal starting coordinate (inclusive)
+ * @x2: horizontal ending coordinate (exclusive)
+ * @y1: vertical starting coordinate (inclusive)
+ * @y2: vertical ending coordinate (exclusive)
+ */
+struct drm_rect {
+ int x1, y1, x2, y2;
+};
+
+/**
+ * drm_rect_adjust_size - adjust the size of the rectangle
+ * @r: rectangle to be adjusted
+ * @dw: horizontal adjustment
+ * @dh: vertical adjustment
+ *
+ * Change the size of rectangle @r by @dw in the horizontal direction,
+ * and by @dh in the vertical direction, while keeping the center
+ * of @r stationary.
+ *
+ * Positive @dw and @dh increase the size, negative values decrease it.
+ */
+static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh)
+{
+ r->x1 -= dw >> 1;
+ r->y1 -= dh >> 1;
+ r->x2 += (dw + 1) >> 1;
+ r->y2 += (dh + 1) >> 1;
+}
+
+/**
+ * drm_rect_translate - translate the rectangle
+ * @r: rectangle to be tranlated
+ * @dx: horizontal translation
+ * @dy: vertical translation
+ *
+ * Move rectangle @r by @dx in the horizontal direction,
+ * and by @dy in the vertical direction.
+ */
+static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy)
+{
+ r->x1 += dx;
+ r->y1 += dy;
+ r->x2 += dx;
+ r->y2 += dy;
+}
+
+/**
+ * drm_rect_downscale - downscale a rectangle
+ * @r: rectangle to be downscaled
+ * @horz: horizontal downscale factor
+ * @vert: vertical downscale factor
+ *
+ * Divide the coordinates of rectangle @r by @horz and @vert.
+ */
+static inline void drm_rect_downscale(struct drm_rect *r, int horz, int vert)
+{
+ r->x1 /= horz;
+ r->y1 /= vert;
+ r->x2 /= horz;
+ r->y2 /= vert;
+}
+
+/**
+ * drm_rect_width - determine the rectangle width
+ * @r: rectangle whose width is returned
+ *
+ * RETURNS:
+ * The width of the rectangle.
+ */
+static int drm_rect_width(const struct drm_rect *r)
+{
+ return r->x2 - r->x1;
+}
+
+/**
+ * drm_rect_height - determine the rectangle height
+ * @r: rectangle whose height is returned
+ *
+ * RETURNS:
+ * The height of the rectangle.
+ */
+static int drm_rect_height(const struct drm_rect *r)
+{
+ return r->y2 - r->y1;
+}
+
+/**
+ * drm_rect_visible - determine if the the rectangle is visible
+ * @r: rectangle whose visibility is returned
+ *
+ * RETURNS:
+ * %true if the rectangle is visible, %false otherwise.
+ */
+static int drm_rect_visible(const struct drm_rect *r)
+{
+ return drm_rect_width(r) > 0 && drm_rect_height(r) > 0;
+}
+
+/**
+ * drm_rect_equals - determine if two rectangles are equal
+ * @r1: first rectangle
+ * @r2: second rectangle
+ *
+ * RETURNS:
+ * %true if the rectangles are equal, %false otherwise.
+ */
+static int drm_rect_equals(const struct drm_rect *r1,
+ const struct drm_rect *r2)
+{
+ return r1->x1 == r2->x1 && r1->x2 == r2->x2 &&
+ r1->y1 == r2->y1 && r1->y2 == r2->y2;
+}
+
+int drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
+int drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
+ const struct drm_rect *clip,
+ int hscale, int vscale);
+int drm_rect_calc_hscale(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ int min_hscale, int max_hscale);
+int drm_rect_calc_vscale(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ int min_vscale, int max_vscale);
+int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
+ struct drm_rect *dst,
+ int min_hscale, int max_hscale);
+int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
+ struct drm_rect *dst,
+ int min_vscale, int max_vscale);
+void drm_rect_debug_print(const struct drm_rect *r, int fixed_point);
+
+#endif /* DRM_RECT_H */
diff --git a/usr/src/uts/common/io/drm/drm_sarea.h b/usr/src/uts/common/drm/drm_sarea.h
index 302edcd..7325558 100644
--- a/usr/src/uts/common/io/drm/drm_sarea.h
+++ b/usr/src/uts/common/drm/drm_sarea.h
@@ -1,8 +1,8 @@
-/*
+/**
* \file drm_sarea.h
* \brief SAREA definitions
*
- * \author Michel D�zer <michel@daenzer.net>
+ * \author Michel Dänzer <michel@daenzer.net>
*/
/*
@@ -28,54 +28,55 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/* BEGIN CSTYLED */
-#ifndef _DRM_SAREA_H
-#define _DRM_SAREA_H
-#pragma ident "%Z%%M% %I% %E% SMI"
+#ifndef _DRM_SAREA_H_
+#define _DRM_SAREA_H_
#include "drm.h"
/* SAREA area needs to be at least a page */
#if defined(__alpha__)
-#define SAREA_MAX 0x2000
+#define SAREA_MAX 0x2000U
#elif defined(__ia64__)
-#define SAREA_MAX 0x10000 /* 64kB */
+#define SAREA_MAX 0x10000U /* 64kB */
#else
/* Intel 830M driver needs at least 8k SAREA */
-#define SAREA_MAX 0x2000UL
+#define SAREA_MAX 0x2000U
#endif
/** Maximum number of drawables in the SAREA */
-#define SAREA_MAX_DRAWABLES 256
+#define SAREA_MAX_DRAWABLES 256
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
/** SAREA drawable */
-typedef struct drm_sarea_drawable {
+struct drm_sarea_drawable {
unsigned int stamp;
unsigned int flags;
-} drm_sarea_drawable_t;
+};
/** SAREA frame */
-typedef struct drm_sarea_frame {
+struct drm_sarea_frame {
unsigned int x;
unsigned int y;
unsigned int width;
unsigned int height;
unsigned int fullscreen;
-} drm_sarea_frame_t;
+};
/** SAREA */
-typedef struct drm_sarea {
+struct drm_sarea {
/** first thing is always the DRM locking structure */
- drm_hw_lock_t lock;
+ struct drm_hw_lock lock;
/** \todo Use readers/writer lock for drm_sarea::drawable_lock */
- drm_hw_lock_t drawable_lock;
- drm_sarea_drawable_t drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
- drm_sarea_frame_t frame; /**< frame */
+ struct drm_hw_lock drawable_lock;
+ struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
+ struct drm_sarea_frame frame; /**< frame */
drm_context_t dummy_context;
-} drm_sarea_t;
+};
+
+typedef struct drm_sarea_drawable drm_sarea_drawable_t;
+typedef struct drm_sarea_frame drm_sarea_frame_t;
+typedef struct drm_sarea drm_sarea_t;
-/* END CSTYLED */
-#endif /* _DRM_SAREA_H */
+#endif /* _DRM_SAREA_H_ */
diff --git a/usr/src/uts/common/drm/drm_sun_i2c.h b/usr/src/uts/common/drm/drm_sun_i2c.h
new file mode 100644
index 0000000..4482543
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_sun_i2c.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __DRM_I2C_H__
+#define __DRM_I2C_H__
+
+#include <sys/ksynch.h>
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/cmn_err.h>
+#include "drm.h"
+
+struct i2c_adapter;
+struct i2c_msg;
+
+struct i2c_algorithm {
+ int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
+ u32 (*functionality) (struct i2c_adapter *);
+};
+
+extern struct i2c_algorithm i2c_bit_algo;
+
+struct i2c_adapter {
+ struct i2c_algorithm *algo;
+ kmutex_t bus_lock;
+ clock_t timeout;
+ int retries;
+ char name[64];
+ void *data;
+ void (*setsda) (void *data, int state);
+ void (*setscl) (void *data, int state);
+ int (*getsda) (void *data);
+ int (*getscl) (void *data);
+ void *algo_data;
+ clock_t udelay;
+};
+
+#define I2C_M_RD 0x01
+#define I2C_M_NOSTART 0x02
+
+struct i2c_msg {
+ u16 addr;
+ u16 flags;
+ u16 len;
+ u8 *buf;
+};
+
+extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
+extern int i2c_bit_add_bus(struct i2c_adapter *adap);
+
+#endif /* __DRM_I2C_H__ */
diff --git a/usr/src/uts/common/drm/drm_sun_idr.h b/usr/src/uts/common/drm/drm_sun_idr.h
new file mode 100644
index 0000000..bf88b8c
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_sun_idr.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __DRM_IDR_H__
+#define __DRM_IDR_H__
+
+#include <sys/avl.h>
+
+struct idr_used_id {
+ struct avl_node link;
+ uint32_t id;
+ void *obj;
+};
+
+struct idr_free_id {
+ struct idr_free_id *next;
+ uint32_t id;
+};
+
+struct idr_free_id_range {
+ struct idr_free_id_range *next;
+ uint32_t start;
+ uint32_t end;
+ uint32_t min_unused_id;
+ struct idr_free_id *free_ids;
+};
+
+struct idr {
+ struct avl_tree used_ids;
+ struct idr_free_id_range *free_id_ranges;
+ kmutex_t lock;
+};
+
+extern void idr_init(struct idr *idrp);
+extern int idr_get_new_above(struct idr *idrp, void *obj, int start, int *newid);
+extern void* idr_find(struct idr *idrp, uint32_t id);
+extern int idr_remove(struct idr *idrp, uint32_t id);
+extern void* idr_replace(struct idr *idrp, void *obj, uint32_t id);
+extern int idr_pre_get(struct idr *idrp, int flag);
+extern int idr_for_each(struct idr *idrp, int (*fn)(int id, void *obj, void *data), void *data);
+extern void idr_remove_all(struct idr *idrp);
+extern void idr_destroy(struct idr* idrp);
+
+#define DRM_GEM_OBJIDR_HASHNODE 1024
+
+struct idr_list {
+ struct idr_list *next, *prev;
+ void *obj;
+ uint32_t handle;
+ caddr_t contain_ptr;
+};
+
+#define idr_list_for_each(entry, head) \
+ for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) \
+ list_for_each(entry, &(head)->next[key])
+
+extern int idr_list_pre_get(struct idr_list *head, int flag);
+extern void idr_list_init(struct idr_list *head);
+extern int idr_list_get_new_above(struct idr_list *head,
+ void *obj,
+ int *handlep);
+extern void *idr_list_find(struct idr_list *head, uint32_t name);
+extern int idr_list_remove(struct idr_list *head, uint32_t name);
+extern void idr_list_free(struct idr_list *head);
+extern int idr_list_empty(struct idr_list *head);
+#endif /* __DRM_IDR_H__ */
diff --git a/usr/src/uts/common/drm/drm_sun_pci.h b/usr/src/uts/common/drm/drm_sun_pci.h
new file mode 100644
index 0000000..6485df2
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_sun_pci.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __DRM_SUN_PCI_H__
+#define __DRM_SUN_PCI_H__
+
+#include <sys/sunddi.h>
+#include "drm_linux.h"
+#define PCI_CONFIG_REGION_NUMS 6
+
+struct pci_config_region {
+ unsigned long start;
+ unsigned long size;
+};
+
+struct pci_dev {
+ struct drm_device *dev;
+ ddi_acc_handle_t pci_cfg_acc_handle;
+
+ uint16_t vendor;
+ uint16_t device;
+ struct pci_config_region regions[PCI_CONFIG_REGION_NUMS];
+ int domain;
+ int bus;
+ int slot;
+ int func;
+ int irq;
+
+ ddi_iblock_cookie_t intr_block;
+
+ int msi_enabled;
+ ddi_intr_handle_t *msi_handle;
+ int msi_size;
+ int msi_actual;
+ uint_t msi_pri;
+ int msi_flag;
+};
+
+#define pci_resource_start(pdev, bar) ((pdev)->regions[(bar)].start)
+#define pci_resource_len(pdev, bar) ((pdev)->regions[(bar)].size)
+#define pci_resource_end(pdev, bar) \
+ ((pci_resource_len((pdev), (bar)) == 0 && \
+ pci_resource_start((pdev), (bar)) == 0) ? 0 : \
+ (pci_resource_start((pdev), (bar)) + \
+ pci_resource_len((pdev), (bar)) - 1))
+
+extern uint8_t* pci_map_rom(struct pci_dev *pdev, size_t *size);
+extern void pci_unmap_rom(struct pci_dev *pdev, uint8_t *base);
+extern void pci_read_config_byte(struct pci_dev *dev, int where, u8 *val);
+extern void pci_read_config_word(struct pci_dev *dev, int where, u16 *val);
+extern void pci_read_config_dword(struct pci_dev *dev, int where, u32 *val);
+extern void pci_write_config_byte(struct pci_dev *dev, int where, u8 val);
+extern void pci_write_config_word(struct pci_dev *dev, int where, u16 val);
+extern void pci_write_config_dword(struct pci_dev *dev, int where, u32 val);
+
+extern int pci_find_capability(struct pci_dev *pdev, int capid);
+extern struct pci_dev * pci_dev_create(struct drm_device *dev);
+extern void pci_dev_destroy(struct pci_dev *pdev);
+
+#endif /* __DRM_SUN_PCI_H__ */
diff --git a/usr/src/uts/common/drm/drm_sun_timer.h b/usr/src/uts/common/drm/drm_sun_timer.h
new file mode 100644
index 0000000..c357805
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_sun_timer.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __DRM_TIMER_H__
+#define __DRM_TIMER_H__
+
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/ksynch.h>
+#include "drm_linux_list.h"
+#include "drm_linux.h"
+
+#define del_timer_sync del_timer
+
+struct timer_list {
+ struct list_head *head;
+ void (*func)(void *);
+ void *arg;
+ clock_t expires;
+ unsigned long expired_time;
+ timeout_id_t timer_id;
+ kmutex_t lock;
+};
+
+extern void init_timer(struct timer_list *timer);
+extern void destroy_timer(struct timer_list *timer);
+extern void setup_timer(struct timer_list *timer, void (*func)(void *), void *arg);
+extern void mod_timer(struct timer_list *timer, clock_t expires);
+extern void del_timer(struct timer_list *timer);
+extern void test_set_timer(struct timer_list *timer, clock_t expires);
+
+#endif /* __DRM_TIMER_H__ */
diff --git a/usr/src/uts/common/drm/drm_sun_workqueue.h b/usr/src/uts/common/drm/drm_sun_workqueue.h
new file mode 100644
index 0000000..a7e40eb
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_sun_workqueue.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __DRM_SUN_WORKQUEUE_H__
+#define __DRM_SUN_WORKQUEUE_H__
+
+typedef void (* taskq_func_t)(void *);
+
+#define INIT_WORK(work, func) \
+ init_work((work), ((taskq_func_t)(func)))
+
+struct work_struct {
+ void (*func) (void *);
+};
+
+struct workqueue_struct {
+ ddi_taskq_t *taskq;
+ char *name;
+};
+
+extern int __queue_work(struct workqueue_struct *wq, struct work_struct *work);
+#define queue_work (void)__queue_work
+extern void init_work(struct work_struct *work, void (*func)(void *));
+extern struct workqueue_struct *create_workqueue(dev_info_t *dip, char *name);
+extern void destroy_workqueue(struct workqueue_struct *wq);
+extern void cancel_delayed_work(struct workqueue_struct *wq);
+extern void flush_workqueue(struct workqueue_struct *wq);
+
+#endif /* __DRM_SUN_WORKQUEUE_H__ */
diff --git a/usr/src/uts/common/drm/drm_sunmod.h b/usr/src/uts/common/drm/drm_sunmod.h
new file mode 100644
index 0000000..ec4de5e
--- /dev/null
+++ b/usr/src/uts/common/drm/drm_sunmod.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Common misc module interfaces of DRM under Solaris
+ */
+
+/*
+ * I915 DRM Driver for Solaris
+ *
+ * This driver provides the hardware 3D acceleration support for Intel
+ * integrated video devices (e.g. i8xx/i915/i945 series chipsets), under the
+ * DRI (Direct Rendering Infrastructure). DRM (Direct Rendering Manager) here
+ * means the kernel device driver in DRI.
+ *
+ * I915 driver is a device dependent driver only, it depends on a misc module
+ * named drm for generic DRM operations.
+ *
+ * This driver also calls into gfx and agpmaster misc modules respectively for
+ * generic graphics operations and AGP master device support.
+ */
+
+#ifndef _SYS_DRM_SUNMOD_H_
+#define _SYS_DRM_SUNMOD_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/conf.h>
+#include <sys/kmem.h>
+#include <sys/visual_io.h>
+#include <sys/fbio.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/open.h>
+#include <sys/modctl.h>
+#include <sys/pci.h>
+#include <sys/kd.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/sunldi.h>
+#include <sys/mkdev.h>
+#include <sys/gfx_private.h>
+#include <sys/agpgart.h>
+#include <sys/agp/agpdefs.h>
+#include <sys/agp/agpmaster_io.h>
+#include "drmP.h"
+#include <sys/modctl.h>
+
+
+/* graphics name for the common graphics minor node */
+#define GFX_NAME "gfx"
+
+/*
+ * softstate for DRM module
+ */
+typedef struct drm_instance_state {
+ kmutex_t mis_lock;
+ kmutex_t dis_ctxlock;
+ major_t mis_major;
+ dev_info_t *mis_dip;
+ drm_device_t *mis_devp;
+ ddi_acc_handle_t mis_cfg_hdl;
+ agp_master_softc_t *mis_agpm; /* agpmaster softstate ptr */
+ gfxp_vgatext_softc_ptr_t mis_gfxp; /* gfx softstate */
+} drm_inst_state_t;
+
+
+struct drm_inst_state_list {
+ drm_inst_state_t disl_state;
+ struct drm_inst_state_list *disl_next;
+
+};
+typedef struct drm_inst_state_list drm_inst_list_t;
+
+extern struct list_head drm_iomem_list;
+
+static int drm_sun_open(dev_t *, int, int, cred_t *);
+static int drm_sun_close(dev_t, int, int, cred_t *);
+static int drm_sun_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
+static int drm_sun_devmap(dev_t, devmap_cookie_t,
+ offset_t, size_t, size_t *, uint_t);
+static int drm_sun_chpoll(dev_t, short, int, short *, struct pollhead **);
+static int drm_sun_read(dev_t, struct uio *, cred_t *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_DRM_SUNMOD_H_ */
diff --git a/usr/src/uts/common/drm/i915_drm.h b/usr/src/uts/common/drm/i915_drm.h
new file mode 100644
index 0000000..459839d
--- /dev/null
+++ b/usr/src/uts/common/drm/i915_drm.h
@@ -0,0 +1,1033 @@
+/*
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_DRM_H_
+#define _I915_DRM_H_
+
+#include <drm/drm.h>
+/* Need to make sure we have this included before going on */
+#include <drm/drm_os_solaris.h>
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+
+/* Each region is a minimum of 16k, and there are at most 255 of them.
+ */
+#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
+ * of chars for next/prev indices */
+#define I915_LOG_MIN_TEX_REGION_SIZE 14
+
+typedef struct _drm_i915_init {
+ enum {
+ I915_INIT_DMA = 0x01,
+ I915_CLEANUP_DMA = 0x02,
+ I915_RESUME_DMA = 0x03
+ } func;
+ unsigned int mmio_offset;
+ int sarea_priv_offset;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
+ unsigned int back_pitch;
+ unsigned int depth_pitch;
+ unsigned int cpp;
+ unsigned int chipset;
+} drm_i915_init_t;
+
+typedef struct _drm_i915_sarea {
+ struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
+ int last_upload; /* last time texture was uploaded */
+ int last_enqueue; /* last time a buffer was enqueued */
+ int last_dispatch; /* age of the most recently dispatched buffer */
+ int ctxOwner; /* last context to upload state */
+ int texAge;
+ int pf_enabled; /* is pageflipping allowed? */
+ int pf_active;
+ int pf_current_page; /* which buffer is being displayed? */
+ int perf_boxes; /* performance boxes to be displayed */
+ int width, height; /* screen size in pixels */
+ int pad0;
+
+ drm_handle_t front_handle;
+ int front_offset;
+ int front_size;
+
+ drm_handle_t back_handle;
+ int back_offset;
+ int back_size;
+
+ drm_handle_t depth_handle;
+ int depth_offset;
+ int depth_size;
+
+ drm_handle_t tex_handle;
+ int tex_offset;
+ int tex_size;
+ int log_tex_granularity;
+ int pitch;
+ int rotation; /* 0, 90, 180 or 270 */
+ int rotated_offset;
+ int rotated_size;
+ int rotated_pitch;
+ int virtualX, virtualY;
+
+ unsigned int front_tiled;
+ unsigned int back_tiled;
+ unsigned int depth_tiled;
+ unsigned int rotated_tiled;
+ unsigned int rotated2_tiled;
+
+ int pipeA_x;
+ int pipeA_y;
+ int pipeA_w;
+ int pipeA_h;
+ int pipeB_x;
+ int pipeB_y;
+ int pipeB_w;
+ int pipeB_h;
+ int pad1;
+
+ /* fill out some space for old userspace triple buffer */
+ drm_handle_t unused_handle;
+ __u32 unused1, unused2, unused3;
+
+ /* buffer object handles for static buffers. May change
+ * over the lifetime of the client.
+ */
+ __u32 front_bo_handle;
+ __u32 back_bo_handle;
+ __u32 unused_bo_handle;
+ __u32 depth_bo_handle;
+
+} drm_i915_sarea_t;
+
+/* due to userspace building against these headers we need some compat here */
+#define planeA_x pipeA_x
+#define planeA_y pipeA_y
+#define planeA_w pipeA_w
+#define planeA_h pipeA_h
+#define planeB_x pipeB_x
+#define planeB_y pipeB_y
+#define planeB_w pipeB_w
+#define planeB_h pipeB_h
+
+/* Flags for perf_boxes
+ */
+#define I915_BOX_RING_EMPTY 0x1
+#define I915_BOX_FLIP 0x2
+#define I915_BOX_WAIT 0x4
+#define I915_BOX_TEXTURE_LOAD 0x8
+#define I915_BOX_LOST_CONTEXT 0x10
+
+/* I915 specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_I915_INIT 0x00
+#define DRM_I915_FLUSH 0x01
+#define DRM_I915_FLIP 0x02
+#define DRM_I915_BATCHBUFFER 0x03
+#define DRM_I915_IRQ_EMIT 0x04
+#define DRM_I915_IRQ_WAIT 0x05
+#define DRM_I915_GETPARAM 0x06
+#define DRM_I915_SETPARAM 0x07
+#define DRM_I915_ALLOC 0x08
+#define DRM_I915_FREE 0x09
+#define DRM_I915_INIT_HEAP 0x0a
+#define DRM_I915_CMDBUFFER 0x0b
+#define DRM_I915_DESTROY_HEAP 0x0c
+#define DRM_I915_SET_VBLANK_PIPE 0x0d
+#define DRM_I915_GET_VBLANK_PIPE 0x0e
+#define DRM_I915_VBLANK_SWAP 0x0f
+#define DRM_I915_HWS_ADDR 0x11
+#define DRM_I915_GEM_INIT 0x13
+#define DRM_I915_GEM_EXECBUFFER 0x14
+#define DRM_I915_GEM_PIN 0x15
+#define DRM_I915_GEM_UNPIN 0x16
+#define DRM_I915_GEM_BUSY 0x17
+#define DRM_I915_GEM_THROTTLE 0x18
+#define DRM_I915_GEM_ENTERVT 0x19
+#define DRM_I915_GEM_LEAVEVT 0x1a
+#define DRM_I915_GEM_CREATE 0x1b
+#define DRM_I915_GEM_PREAD 0x1c
+#define DRM_I915_GEM_PWRITE 0x1d
+#define DRM_I915_GEM_MMAP 0x1e
+#define DRM_I915_GEM_SET_DOMAIN 0x1f
+#define DRM_I915_GEM_SW_FINISH 0x20
+#define DRM_I915_GEM_SET_TILING 0x21
+#define DRM_I915_GEM_GET_TILING 0x22
+#define DRM_I915_GEM_GET_APERTURE 0x23
+#define DRM_I915_GEM_MMAP_GTT 0x24
+#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
+#define DRM_I915_GEM_MADVISE 0x26
+#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
+#define DRM_I915_OVERLAY_ATTRS 0x28
+#define DRM_I915_GEM_EXECBUFFER2 0x29
+#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
+#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
+#define DRM_I915_GEM_WAIT 0x2c
+#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
+#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
+#define DRM_I915_GEM_SET_CACHING 0x2f
+#define DRM_I915_GEM_GET_CACHING 0x30
+#define DRM_I915_REG_READ 0x31
+
+#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
+#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
+#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
+#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
+#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
+#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
+#define DRM_IOCTL_I915_GETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
+#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
+#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
+#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
+#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
+#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
+#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
+#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_HWS_ADDR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, drm_i915_hws_addr_t)
+#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
+#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
+#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
+#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
+#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
+#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
+#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
+#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
+#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
+#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
+#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
+#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
+#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
+#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
+#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
+#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
+#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
+#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
+#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
+
+#ifdef _MULTI_DATAMODEL
+#define I915_IOCTL_DEF(ioctl, _func, _flags, _copyin32, _copyout32) \
+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = _copyin32, .copyout32 = _copyout32}
+#else
+#define I915_IOCTL_DEF(ioctl, _func, _flags, _copyin32, _copyout32) \
+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = NULL, .copyout32 = NULL}
+#endif
+
+/* Allow drivers to submit batchbuffers directly to hardware, relying
+ * on the security mechanisms provided by hardware.
+ */
+typedef struct drm_i915_batchbuffer {
+ int start; /* agp offset */
+ int used; /* nr bytes in use */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
+} drm_i915_batchbuffer_t;
+
+typedef struct drm_i915_batchbuffer32 {
+ int start; /* agp offset */
+ int used; /* nr bytes in use */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ caddr32_t cliprects; /* pointer to userspace cliprects */
+} drm_i915_batchbuffer32_t;
+
+/* As above, but pass a pointer to userspace buffer which can be
+ * validated by the kernel prior to sending to hardware.
+ */
+typedef struct _drm_i915_cmdbuffer {
+ char __user *buf; /* pointer to userspace command buffer */
+ int sz; /* nr bytes in buf */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
+} drm_i915_cmdbuffer_t;
+
+typedef struct drm_i915_cmdbuffer32 {
+ caddr32_t buf; /* pointer to userspace command buffer */
+ int sz; /* nr bytes in buf */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ caddr32_t cliprects; /* pointer to userspace cliprects */
+} drm_i915_cmdbuffer32_t;
+
+/* Userspace can request & wait on irq's:
+ */
+typedef struct drm_i915_irq_emit {
+ int __user *irq_seq;
+} drm_i915_irq_emit_t;
+
+typedef struct drm_i915_irq_emit32 {
+ caddr32_t irq_seq;
+} drm_i915_irq_emit32_t;
+
+typedef struct drm_i915_irq_wait {
+ int irq_seq;
+} drm_i915_irq_wait_t;
+
+/* Ioctl to query kernel params:
+ */
+#define I915_PARAM_IRQ_ACTIVE 1
+#define I915_PARAM_ALLOW_BATCHBUFFER 2
+#define I915_PARAM_LAST_DISPATCH 3
+#define I915_PARAM_CHIPSET_ID 4
+#define I915_PARAM_HAS_GEM 5
+#define I915_PARAM_NUM_FENCES_AVAIL 6
+#define I915_PARAM_HAS_OVERLAY 7
+#define I915_PARAM_HAS_PAGEFLIPPING 8
+#define I915_PARAM_HAS_EXECBUF2 9
+#define I915_PARAM_HAS_BSD 10
+#define I915_PARAM_HAS_BLT 11
+#define I915_PARAM_HAS_RELAXED_FENCING 12
+#define I915_PARAM_HAS_COHERENT_RINGS 13
+#define I915_PARAM_HAS_EXEC_CONSTANTS 14
+#define I915_PARAM_HAS_RELAXED_DELTA 15
+#define I915_PARAM_HAS_GEN7_SOL_RESET 16
+#define I915_PARAM_HAS_LLC 17
+#define I915_PARAM_HAS_ALIASING_PPGTT 18
+#define I915_PARAM_HAS_WAIT_TIMEOUT 19
+#define I915_PARAM_HAS_SEMAPHORES 20
+#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
+#define I915_PARAM_HAS_VEBOX 22
+#define I915_PARAM_HAS_SECURE_BATCHES 23
+#define I915_PARAM_HAS_PINNED_BATCHES 24
+#define I915_PARAM_HAS_EXEC_NO_RELOC 25
+#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
+
+typedef struct drm_i915_getparam {
+ int param;
+ int __user *value;
+} drm_i915_getparam_t;
+
+typedef struct drm_i915_getparam32 {
+ int param;
+ caddr32_t value;
+} drm_i915_getparam32_t;
+
+/* Ioctl to set kernel params:
+ */
+#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
+#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
+#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
+#define I915_SETPARAM_NUM_USED_FENCES 4
+
+typedef struct drm_i915_setparam {
+ int param;
+ int value;
+} drm_i915_setparam_t;
+
+/* A memory manager for regions of shared memory:
+ */
+#define I915_MEM_REGION_AGP 1
+
+typedef struct drm_i915_mem_alloc {
+ int region;
+ int alignment;
+ int size;
+ int __user *region_offset; /* offset from start of fb or agp */
+} drm_i915_mem_alloc_t;
+
+typedef struct drm_i915_mem_alloc32 {
+ int region;
+ int alignment;
+ int size;
+ caddr32_t region_offset; /* offset from start of fb or agp */
+} drm_i915_mem_alloc32_t;
+
+typedef struct drm_i915_mem_free {
+ int region;
+ int region_offset;
+} drm_i915_mem_free_t;
+
+typedef struct drm_i915_mem_init_heap {
+ int region;
+ int size;
+ int start;
+} drm_i915_mem_init_heap_t;
+
+/* Allow memory manager to be torn down and re-initialized (eg on
+ * rotate):
+ */
+typedef struct drm_i915_mem_destroy_heap {
+ int region;
+} drm_i915_mem_destroy_heap_t;
+
+/* Allow X server to configure which pipes to monitor for vblank signals
+ */
+#define DRM_I915_VBLANK_PIPE_A 1
+#define DRM_I915_VBLANK_PIPE_B 2
+
+typedef struct drm_i915_vblank_pipe {
+ int pipe;
+} drm_i915_vblank_pipe_t;
+
+/* Schedule buffer swap at given vertical blank:
+ */
+typedef struct drm_i915_vblank_swap {
+ drm_drawable_t drawable;
+ enum drm_vblank_seq_type seqtype;
+ unsigned int sequence;
+} drm_i915_vblank_swap_t;
+
+typedef struct drm_i915_hws_addr {
+ __u64 addr;
+} drm_i915_hws_addr_t;
+
+struct drm_i915_gem_init {
+ /**
+ * Beginning offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ __u64 gtt_start;
+ /**
+ * Ending offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ __u64 gtt_end;
+};
+
+struct drm_i915_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ __u64 size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+ __u32 pad;
+};
+
+struct drm_i915_gem_pread {
+ /** Handle for the object being read. */
+ __u32 handle;
+ __u32 pad;
+ /** Offset into the object to read from */
+ __u64 offset;
+ /** Length of data to read */
+ __u64 size;
+ /**
+ * Pointer to write the data into.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 data_ptr;
+};
+
+struct drm_i915_gem_pwrite {
+ /** Handle for the object being written to. */
+ __u32 handle;
+ __u32 pad;
+ /** Offset into the object to write to */
+ __u64 offset;
+ /** Length of data to write */
+ __u64 size;
+ /**
+ * Pointer to read the data from.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 data_ptr;
+};
+
+struct drm_i915_gem_mmap {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /** Offset in the object to map. */
+ __u64 offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ __u64 size;
+ /**
+ * Returned pointer the data was mapped at.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 addr_ptr;
+};
+
+struct drm_i915_gem_mmap_gtt {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+};
+
+struct drm_i915_gem_set_domain {
+ /** Handle for the object */
+ __u32 handle;
+
+ /** New read domains */
+ __u32 read_domains;
+
+ /** New write domain */
+ __u32 write_domain;
+};
+
+struct drm_i915_gem_sw_finish {
+ /** Handle for the object */
+ __u32 handle;
+};
+
+struct drm_i915_gem_relocation_entry {
+ /**
+ * Handle of the buffer being pointed to by this relocation entry.
+ *
+ * It's appealing to make this be an index into the mm_validate_entry
+ * list to refer to the buffer, but this allows the driver to create
+ * a relocation list for state buffers and not re-write it per
+ * exec using the buffer.
+ */
+ __u32 target_handle;
+
+ /**
+ * Value to be added to the offset of the target buffer to make up
+ * the relocation entry.
+ */
+ __u32 delta;
+
+ /** Offset in the buffer the relocation entry will be written into */
+ __u64 offset;
+
+ /**
+ * Offset value of the target buffer that the relocation entry was last
+ * written as.
+ *
+ * If the buffer has the same offset as last time, we can skip syncing
+ * and writing the relocation. This value is written back out by
+ * the execbuffer ioctl when the relocation is written.
+ */
+ __u64 presumed_offset;
+
+ /**
+ * Target memory domains read by this operation.
+ */
+ __u32 read_domains;
+
+ /**
+ * Target memory domains written by this operation.
+ *
+ * Note that only one domain may be written by the whole
+ * execbuffer operation, so that where there are conflicts,
+ * the application will get -EINVAL back.
+ */
+ __u32 write_domain;
+};
+
+/** @{
+ * Intel memory domains
+ *
+ * Most of these just align with the various caches in
+ * the system and are used to flush and invalidate as
+ * objects end up cached in different domains.
+ */
+/** CPU cache */
+#define I915_GEM_DOMAIN_CPU 0x00000001
+/** Render cache, used by 2D and 3D drawing */
+#define I915_GEM_DOMAIN_RENDER 0x00000002
+/** Sampler cache, used by texture engine */
+#define I915_GEM_DOMAIN_SAMPLER 0x00000004
+/** Command queue, used to load batch buffers */
+#define I915_GEM_DOMAIN_COMMAND 0x00000008
+/** Instruction cache, used by shader programs */
+#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
+/** Vertex address cache */
+#define I915_GEM_DOMAIN_VERTEX 0x00000020
+/** GTT domain - aperture and scanout */
+#define I915_GEM_DOMAIN_GTT 0x00000040
+/** @} */
+
+struct drm_i915_gem_exec_object {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ __u32 handle;
+
+ /** Number of relocations to be performed on this buffer */
+ __u32 relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ __u64 relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ __u64 alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ __u64 offset;
+};
+
+struct drm_i915_gem_execbuffer {
+ /**
+ * List of buffers to be validated with their relocations to be
+ * performend on them.
+ *
+ * This is a pointer to an array of struct drm_i915_gem_validate_entry.
+ *
+ * These buffers must be listed in an order such that all relocations
+ * a buffer is performing refer to buffers that have already appeared
+ * in the validate list.
+ */
+ __u64 buffers_ptr;
+ __u32 buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ __u32 batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ __u32 batch_len;
+ __u32 DR1;
+ __u32 DR4;
+ __u32 num_cliprects;
+ /** This is a struct drm_clip_rect *cliprects */
+ __u64 cliprects_ptr;
+};
+
+struct drm_i915_gem_exec_object2 {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ __u32 handle;
+
+ /** Number of relocations to be performed on this buffer */
+ __u32 relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ __u64 relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ __u64 alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ __u64 offset;
+
+#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
+#define EXEC_OBJECT_NEEDS_GTT (1<<1)
+#define EXEC_OBJECT_WRITE (1<<2)
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1)
+ __u64 flags;
+ __u64 rsvd1;
+ __u64 rsvd2;
+};
+
+struct drm_i915_gem_execbuffer2 {
+ /**
+ * List of gem_exec_object2 structs
+ */
+ __u64 buffers_ptr;
+ __u32 buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ __u32 batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ __u32 batch_len;
+ __u32 DR1;
+ __u32 DR4;
+ __u32 num_cliprects;
+ /** This is a struct drm_clip_rect *cliprects */
+ __u64 cliprects_ptr;
+#define I915_EXEC_RING_MASK (7<<0)
+#define I915_EXEC_DEFAULT (0<<0)
+#define I915_EXEC_RENDER (1<<0)
+#define I915_EXEC_BSD (2<<0)
+#define I915_EXEC_BLT (3<<0)
+#define I915_EXEC_VEBOX (4<<0)
+
+/* Used for switching the constants addressing mode on gen4+ RENDER ring.
+ * Gen6+ only supports relative addressing to dynamic state (default) and
+ * absolute addressing.
+ *
+ * These flags are ignored for the BSD and BLT rings.
+ */
+#define I915_EXEC_CONSTANTS_MASK (3<<6)
+#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
+#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
+#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
+ __u64 flags; /* currently unused */
+ __u64 rsvd1;
+ __u64 rsvd2;
+};
+
+/** Resets the SO write offset registers for transform feedback on gen7. */
+#define I915_EXEC_GEN7_SOL_RESET (1<<8)
+
+/** Request a privileged ("secure") batch buffer. Note only available for
+ * DRM_ROOT_ONLY | DRM_MASTER processes.
+ */
+#define I915_EXEC_SECURE (1<<9)
+
+/** Inform the kernel that the batch is and will always be pinned. This
+ * negates the requirement for a workaround to be performed to avoid
+ * an incoherent CS (such as can be found on 830/845). If this flag is
+ * not passed, the kernel will endeavour to make sure the batch is
+ * coherent with the CS before execution. If this flag is passed,
+ * userspace assumes the responsibility for ensuring the same.
+ */
+#define I915_EXEC_IS_PINNED (1<<10)
+
+/** Provide a hint to the kernel that the command stream and auxilliary
+ * state buffers already holds the correct presumed addresses and so the
+ * relocation process may be skipped if no buffers need to be moved in
+ * preparation for the execbuffer.
+ */
+#define I915_EXEC_NO_RELOC (1<<11)
+
+/** Use the reloc.handle as an index into the exec object array rather
+ * than as the per-file handle.
+ */
+#define I915_EXEC_HANDLE_LUT (1<<12)
+
+#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1)
+
+#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
+#define i915_execbuffer2_set_context_id(eb2, context) \
+ (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
+#define i915_execbuffer2_get_context_id(eb2) \
+ ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
+
+struct drm_i915_gem_pin {
+ /** Handle of the buffer to be pinned. */
+ __u32 handle;
+ __u32 pad;
+
+ /** alignment required within the aperture */
+ __u64 alignment;
+
+ /** Returned GTT offset of the buffer. */
+ __u64 offset;
+};
+
+struct drm_i915_gem_unpin {
+ /** Handle of the buffer to be unpinned. */
+ __u32 handle;
+ __u32 pad;
+};
+
+struct drm_i915_gem_busy {
+ /** Handle of the buffer to check for busy */
+ __u32 handle;
+
+ /** Return busy status (1 if busy, 0 if idle).
+ * The high word is used to indicate on which rings the object
+ * currently resides:
+ * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
+ */
+ __u32 busy;
+};
+
+#define I915_CACHING_NONE 0
+#define I915_CACHING_CACHED 1
+
+struct drm_i915_gem_caching {
+ /**
+ * Handle of the buffer to set/get the caching level of. */
+ __u32 handle;
+
+ /**
+ * Cacheing level to apply or return value
+ *
+ * bits0-15 are for generic caching control (i.e. the above defined
+ * values). bits16-31 are reserved for platform-specific variations
+ * (e.g. l3$ caching on gen7). */
+ __u32 caching;
+};
+
+#define I915_TILING_NONE 0
+#define I915_TILING_X 1
+#define I915_TILING_Y 2
+
+#define I915_BIT_6_SWIZZLE_NONE 0
+#define I915_BIT_6_SWIZZLE_9 1
+#define I915_BIT_6_SWIZZLE_9_10 2
+#define I915_BIT_6_SWIZZLE_9_11 3
+#define I915_BIT_6_SWIZZLE_9_10_11 4
+/* Not seen by userland */
+#define I915_BIT_6_SWIZZLE_UNKNOWN 5
+/* Seen by userland. */
+#define I915_BIT_6_SWIZZLE_9_17 6
+#define I915_BIT_6_SWIZZLE_9_10_17 7
+
+struct drm_i915_gem_set_tiling {
+ /** Handle of the buffer to have its tiling state updated */
+ __u32 handle;
+
+ /**
+ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ *
+ * This value is to be set on request, and will be updated by the
+ * kernel on successful return with the actual chosen tiling layout.
+ *
+ * The tiling mode may be demoted to I915_TILING_NONE when the system
+ * has bit 6 swizzling that can't be managed correctly by GEM.
+ *
+ * Buffer contents become undefined when changing tiling_mode.
+ */
+ __u32 tiling_mode;
+
+ /**
+ * Stride in bytes for the object when in I915_TILING_X or
+ * I915_TILING_Y.
+ */
+ __u32 stride;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ __u32 swizzle_mode;
+};
+
+struct drm_i915_gem_get_tiling {
+ /** Handle of the buffer to get tiling state for. */
+ __u32 handle;
+
+ /**
+ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ */
+ __u32 tiling_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ __u32 swizzle_mode;
+};
+
+struct drm_i915_gem_get_aperture {
+ /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
+ __u64 aper_size;
+
+ /**
+ * Available space in the aperture used by i915_gem_execbuffer, in
+ * bytes
+ */
+ __u64 aper_available_size;
+};
+
+struct drm_i915_get_pipe_from_crtc_id {
+ /** ID of CRTC being requested **/
+ __u32 crtc_id;
+
+ /** pipe of requested CRTC **/
+ __u32 pipe;
+};
+
+#define I915_MADV_WILLNEED 0
+#define I915_MADV_DONTNEED 1
+#define __I915_MADV_PURGED 2 /* internal state */
+
+struct drm_i915_gem_madvise {
+ /** Handle of the buffer to change the backing store advice */
+ __u32 handle;
+
+ /* Advice: either the buffer will be needed again in the near future,
+ * or wont be and could be discarded under memory pressure.
+ */
+ __u32 madv;
+
+ /** Whether the backing store still exists. */
+ __u32 retained;
+};
+
+/* flags */
+#define I915_OVERLAY_TYPE_MASK 0xff
+#define I915_OVERLAY_YUV_PLANAR 0x01
+#define I915_OVERLAY_YUV_PACKED 0x02
+#define I915_OVERLAY_RGB 0x03
+
+#define I915_OVERLAY_DEPTH_MASK 0xff00
+#define I915_OVERLAY_RGB24 0x1000
+#define I915_OVERLAY_RGB16 0x2000
+#define I915_OVERLAY_RGB15 0x3000
+#define I915_OVERLAY_YUV422 0x0100
+#define I915_OVERLAY_YUV411 0x0200
+#define I915_OVERLAY_YUV420 0x0300
+#define I915_OVERLAY_YUV410 0x0400
+
+#define I915_OVERLAY_SWAP_MASK 0xff0000
+#define I915_OVERLAY_NO_SWAP 0x000000
+#define I915_OVERLAY_UV_SWAP 0x010000
+#define I915_OVERLAY_Y_SWAP 0x020000
+#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
+
+#define I915_OVERLAY_FLAGS_MASK 0xff000000
+#define I915_OVERLAY_ENABLE 0x01000000
+
+struct drm_intel_overlay_put_image {
+ /* various flags and src format description */
+ __u32 flags;
+ /* source picture description */
+ __u32 bo_handle;
+ /* stride values and offsets are in bytes, buffer relative */
+ __u16 stride_Y; /* stride for packed formats */
+ __u16 stride_UV;
+ __u32 offset_Y; /* offset for packet formats */
+ __u32 offset_U;
+ __u32 offset_V;
+ /* in pixels */
+ __u16 src_width;
+ __u16 src_height;
+ /* to compensate the scaling factors for partially covered surfaces */
+ __u16 src_scan_width;
+ __u16 src_scan_height;
+ /* output crtc description */
+ __u32 crtc_id;
+ __u16 dst_x;
+ __u16 dst_y;
+ __u16 dst_width;
+ __u16 dst_height;
+};
+
+/* flags */
+#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
+#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
+struct drm_intel_overlay_attrs {
+ __u32 flags;
+ __u32 color_key;
+ __s32 brightness;
+ __u32 contrast;
+ __u32 saturation;
+ __u32 gamma0;
+ __u32 gamma1;
+ __u32 gamma2;
+ __u32 gamma3;
+ __u32 gamma4;
+ __u32 gamma5;
+};
+
+/*
+ * Intel sprite handling
+ *
+ * Color keying works with a min/mask/max tuple. Both source and destination
+ * color keying is allowed.
+ *
+ * Source keying:
+ * Sprite pixels within the min & max values, masked against the color channels
+ * specified in the mask field, will be transparent. All other pixels will
+ * be displayed on top of the primary plane. For RGB surfaces, only the min
+ * and mask fields will be used; ranged compares are not allowed.
+ *
+ * Destination keying:
+ * Primary plane pixels that match the min value, masked against the color
+ * channels specified in the mask field, will be replaced by corresponding
+ * pixels from the sprite plane.
+ *
+ * Note that source & destination keying are exclusive; only one can be
+ * active on a given plane.
+ */
+
+#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
+#define I915_SET_COLORKEY_DESTINATION (1<<1)
+#define I915_SET_COLORKEY_SOURCE (1<<2)
+struct drm_intel_sprite_colorkey {
+ __u32 plane_id;
+ __u32 min_value;
+ __u32 channel_mask;
+ __u32 max_value;
+ __u32 flags;
+};
+
+struct drm_i915_gem_wait {
+ /** Handle of BO we shall wait on */
+ __u32 bo_handle;
+ __u32 flags;
+ /** Number of nanoseconds to wait, Returns time remaining. */
+ __s64 timeout_ns;
+};
+
+struct drm_i915_gem_context_create {
+ /* output: id of new context*/
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+struct drm_i915_gem_context_destroy {
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+struct drm_i915_reg_read {
+ __u64 offset;
+ __u64 val; /* Return value */
+};
+#endif /* _I915_DRM_H_ */
diff --git a/usr/src/uts/common/io/drm/LICENSE_DRM b/usr/src/uts/common/io/drm/LICENSE_DRM
new file mode 100644
index 0000000..2e6277e
--- /dev/null
+++ b/usr/src/uts/common/io/drm/LICENSE_DRM
@@ -0,0 +1,1349 @@
+
+File: ati_pcigart.c
+
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_agpsupport.c
+
+/**
+ * \file drm_agpsupport.c
+ * DRM support for AGP/GART backend
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_atomic.h
+
+/*
+ * Copyright 2004 Eric Anholt
+ * Copyright (c) 2009, 2012, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_auth.c
+
+/**
+ * \file drm_auth.c
+ * IOCTLs for authentication
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_bufs.c
+
+/**
+ * \file drm_bufs.c
+ * Generic buffer template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2012, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_cache.c
+
+/*
+ * Copyright(c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * Copyright (c) 2009, 2012, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files(the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice(including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_context.c
+
+/**
+ * \file drm_context.c
+ * IOCTLs for generic contexts
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_core.h
+
+/*
+ * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_crtc_helper.c
+
+/*
+ * Copyright (c) 2006-2008, 2013, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_crtc_helper.h
+
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_crtc.c
+
+/*
+ * Copyright (c) 2006-2008, 2013, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_crtc.h
+
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_dma.c
+
+/**
+ * \file drm_dma.c
+ * DMA IOCTL and function support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_dp_helper.c
+
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_dp_helper.h
+
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_dp_i2c_helper.c
+
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_drv.c
+
+/*
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_edid.c
+
+ *
+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
+ * FB layer.
+ * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_edid.h
+
+/*
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_fb_helper.c
+
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008, 2013, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_fb_helper.h
+
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008, 2012, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_fops.c
+
+/**
+ * \file drm_fops.c
+ * File operations for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Daryll Strauss <daryll@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_fourcc.h
+
+/*
+ * Copyright 2011, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_gem.c
+
+/*
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_ioctl.c
+
+/**
+ * \file drm_ioctl.c
+ * IOCTL processing for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_irq.c
+
+/**
+ * \file drm_irq.c
+ * IRQ support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_linux_list.h
+
+/*
+ * -
+ * Copyright 2003 Eric Anholt
+ * Copyright (c) 2009, 2012, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <anholt@FreeBSD.org>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_lock.c
+
+/**
+ * \file drm_lock.c
+ * IOCTLs for locking
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_memory.c
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2012, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_mm.c
+
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+-------------------------------------------------------------------------
+
+File: drm_mm.h
+
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+-------------------------------------------------------------------------
+
+File: drm_mode.h
+
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
+ * Copyright (c) 2008 Red Hat Inc.
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_modes.c
+
+/*
+ *
+ * Copyright © 1997-2003 by The XFree86 Project, Inc.
+ * Copyright © 2007 Dave Airlie
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2005-2006 Luc Verhaegen
+ * Copyright (c) 2001, Andy Ritger aritger@nvidia.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the name of the copyright holder(s)
+ * and author(s) shall not be used in advertising or otherwise to promote
+ * the sale, use or other dealings in this Software without prior written
+ * authorization from the copyright holder(s) and author(s).
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_pci.c
+
+/*-
+ * Copyright 2003 Eric Anholt.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_rect.c
+
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_rect.h
+
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_sarea.h
+
+/*
+ * \file drm_sarea.h
+ * \brief SAREA definitions
+ *
+ * \author Michel D�zer <michel@daenzer.net>
+ */
+
+/*
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_scatter.c
+
+/*
+ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm_stub.c
+
+/*
+ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
+ *
+ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drm.h
+
+/**
+ * \file drm.h
+ * Header for the Direct Rendering Manager
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ * \par Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, Intel Corporation.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+-------------------------------------------------------------------------
+
+File: drmP.h
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2013, Intel Corporation.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
+ */
+
+-------------------------------------------------------------------------
diff --git a/usr/src/uts/common/io/drm/THIRDPARTYLICENSE.descrip b/usr/src/uts/common/io/drm/LICENSE_DRM.descrip
index 70fef96..70fef96 100644
--- a/usr/src/uts/common/io/drm/THIRDPARTYLICENSE.descrip
+++ b/usr/src/uts/common/io/drm/LICENSE_DRM.descrip
diff --git a/usr/src/uts/common/io/drm/THIRDPARTYLICENSE b/usr/src/uts/common/io/drm/THIRDPARTYLICENSE
deleted file mode 100644
index 30228d9..0000000
--- a/usr/src/uts/common/io/drm/THIRDPARTYLICENSE
+++ /dev/null
@@ -1,314 +0,0 @@
- Solaris Direct Rendering Manager kernel drivers and modules
-
---------------------------------------------------------------------------
-
-In addition to a Sun copyright, the following files:
-
-usr/src/uts/common/io/drm/drm.h
-usr/src/uts/common/io/drm/drmP.h
-usr/src/uts/common/io/drm/drm_agpsupport.c
-usr/src/uts/common/io/drm/drm_auth.c
-usr/src/uts/common/io/drm/drm_fops.c
-usr/src/uts/common/io/drm/drm_ioctl.c
-usr/src/uts/common/io/drm/drm_lock.c
-usr/src/uts/common/io/drm/drm_memory.c
-
-are covered by the following copyrights/license text:
-
-/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
---------------------------------------------------------------------------
-
-File:
-
-usr/src/uts/common/io/drm/drm_drawable.c
-
-is covered by the following copyrights/license text:
-
-/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
---------------------------------------------------------------------------
-
-In addition to a Sun copyright, the following files:
-
-usr/src/uts/common/io/drm/drm_irq.c
-usr/src/uts/common/io/drm/drm_pci.c
-
-are covered by the following copyrights/license text:
-
-/*
- * Copyright 2003 Eric Anholt
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
- * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
---------------------------------------------------------------------------
-
-File:
-
-usr/src/uts/common/io/drm/drm_sarea.h
-
-are covered by the following copyrights/license text:
-
-/*
- * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
---------------------------------------------------------------------------
-
-In addition to a Sun copyright, the following files:
-
-usr/src/uts/common/io/drm/drm_scatter.c
-usr/src/uts/i86pc/io/drm/i915_drv.c
-
-are covered by the following copyrights/license text:
-
-/*-
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
---------------------------------------------------------------------------
-
-In addition to a Sun copyright, the following files:
-
-usr/src/uts/common/io/drm/drm_bufs.c
-usr/src/uts/common/io/drm/drm_context.c
-usr/src/uts/common/io/drm/drm_dma.c
-usr/src/uts/common/io/drm/drm_drv.c
-
-are covered by the following copyrights/license text:
-
-/*
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
---------------------------------------------------------------------------
-
-File:
-
-usr/src/uts/common/io/drm/queue.h
-
-is covered by the following copyrights/license text:
-
-/*-
- * Copyright (c) 1991, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
---------------------------------------------------------------------------
-
-In addition to a Sun copyright, the following files:
-
-usr/src/uts/i86pc/io/drm/i915_dma.c
-usr/src/uts/i86pc/io/drm/i915_drm.h
-usr/src/uts/i86pc/io/drm/i915_drv.h
-usr/src/uts/i86pc/io/drm/i915_irq.c
-usr/src/uts/i86pc/io/drm/i915_mem.c
-
-are covered by the following copyrights/license text:
-
-/**************************************************************************
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
---------------------------------------------------------------------------
-
-File:
-
-usr/src/uts/common/io/drm/drm_pciids.txt
-
-is not covered by any copyright.
-
---------------------------------------------------------------------------
-
-All other files are covered by a Sun copyright and the CDDL:
-
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
diff --git a/usr/src/uts/common/io/drm/ati_pcigart.c b/usr/src/uts/common/io/drm/ati_pcigart.c
index 4c236c1..86a5987 100644
--- a/usr/src/uts/common/io/drm/ati_pcigart.c
+++ b/usr/src/uts/common/io/drm/ati_pcigart.c
@@ -1,7 +1,7 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
*/
+
/*
* ati_pcigart.h -- ATI PCI GART support -*- linux-c -*-
* Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
@@ -41,6 +41,23 @@
#define ATI_PCIGART_TABLE_SIZE 32768
int
+/* LINTED */
+drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
+{
+ drm_dma_handle_t *dmah;
+
+ if (dev->sg == NULL) {
+ DRM_ERROR("no scatter/gather memory!\n");
+ return (0);
+ }
+ dmah = dev->sg->dmah_gart;
+ dev->sg->dmah_gart = NULL;
+ if (dmah)
+ drm_pci_free(dmah);
+ return (1);
+}
+
+int
drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
{
unsigned long pages;
@@ -109,20 +126,3 @@ out:
return (1);
}
-
-/*ARGSUSED*/
-extern int
-drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
-{
- drm_dma_handle_t *dmah;
-
- if (dev->sg == NULL) {
- DRM_ERROR("no scatter/gather memory!\n");
- return (0);
- }
- dmah = dev->sg->dmah_gart;
- dev->sg->dmah_gart = NULL;
- if (dmah)
- drm_pci_free(dev, dmah);
- return (1);
-}
diff --git a/usr/src/uts/common/io/drm/drm.h b/usr/src/uts/common/io/drm/drm.h
deleted file mode 100644
index 87af6ed..0000000
--- a/usr/src/uts/common/io/drm/drm.h
+++ /dev/null
@@ -1,865 +0,0 @@
-/* BEGIN CSTYLED */
-
-/**
- * \file drm.h
- * Header for the Direct Rendering Manager
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- *
- * \par Acknowledgments:
- * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
- */
-
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/**
- * \mainpage
- *
- * The Direct Rendering Manager (DRM) is a device-independent kernel-level
- * device driver that provides support for the XFree86 Direct Rendering
- * Infrastructure (DRI).
- *
- * The DRM supports the Direct Rendering Infrastructure (DRI) in four major
- * ways:
- * -# The DRM provides synchronized access to the graphics hardware via
- * the use of an optimized two-tiered lock.
- * -# The DRM enforces the DRI security policy for access to the graphics
- * hardware by only allowing authenticated X11 clients access to
- * restricted regions of memory.
- * -# The DRM provides a generic DMA engine, complete with multiple
- * queues and the ability to detect the need for an OpenGL context
- * switch.
- * -# The DRM is extensible via the use of small device-specific modules
- * that rely extensively on the API exported by the DRM module.
- *
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _DRM_H_
-#define _DRM_H_
-
-#include <sys/types32.h>
-
-#ifndef __user
-#define __user
-#endif
-
-#ifdef __GNUC__
-# define DEPRECATED __attribute__ ((deprecated))
-#else
-# define DEPRECATED
-# define __volatile__ volatile
-#endif
-
-#if defined(__linux__)
-#include <asm/ioctl.h> /* For _IO* macros */
-#define DRM_IOCTL_NR(n) _IOC_NR(n)
-#define DRM_IOC_VOID _IOC_NONE
-#define DRM_IOC_READ _IOC_READ
-#define DRM_IOC_WRITE _IOC_WRITE
-#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
-#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
-#if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && defined(IN_MODULE)
-/* Prevent name collision when including sys/ioccom.h */
-#undef ioctl
-#include <sys/ioccom.h>
-#define ioctl(a,b,c) xf86ioctl(a,b,c)
-#else
-#include <sys/ioccom.h>
-#endif /* __FreeBSD__ && xf86ioctl */
-#define DRM_IOCTL_NR(n) ((n) & 0xff)
-#define DRM_IOC_VOID IOC_VOID
-#define DRM_IOC_READ IOC_OUT
-#define DRM_IOC_WRITE IOC_IN
-#define DRM_IOC_READWRITE IOC_INOUT
-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
-#endif
-
-/* Solaris-specific. */
-#if defined(__SOLARIS__) || defined(sun)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir, type, nr, size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used for X server compile */
-#if !defined(_KERNEL)
-#define _IO(type, nr) _IOC(_IOC_NONE, (type), (nr), 0)
-#define _IOR(type, nr, size) _IOC(_IOC_READ, (type), (nr), sizeof (size))
-#define _IOW(type, nr, size) _IOC(_IOC_WRITE, (type), (nr), sizeof (size))
-#define _IOWR(type, nr, size) _IOC(_IOC_READ|_IOC_WRITE, \
- (type), (nr), sizeof (size))
-
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-#endif /* _KERNEL */
-
-#define DRM_IOCTL_NR(n) _IOC_NR(n)
-#define DRM_IOC_VOID IOC_VOID
-#define DRM_IOC_READ IOC_OUT
-#define DRM_IOC_WRITE IOC_IN
-#define DRM_IOC_READWRITE IOC_INOUT
-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
-
-#endif /* __Solaris__ or sun */
-#define XFREE86_VERSION(major,minor,patch,snap) \
- ((major << 16) | (minor << 8) | patch)
-
-#ifndef CONFIG_XFREE86_VERSION
-#define CONFIG_XFREE86_VERSION XFREE86_VERSION(4,1,0,0)
-#endif
-
-#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
-#define DRM_PROC_DEVICES "/proc/devices"
-#define DRM_PROC_MISC "/proc/misc"
-#define DRM_PROC_DRM "/proc/drm"
-#define DRM_DEV_DRM "/dev/drm"
-#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
-#define DRM_DEV_UID 0
-#define DRM_DEV_GID 0
-#endif
-
-#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
-#ifdef __OpenBSD__
-#define DRM_MAJOR 81
-#endif
-#if defined(__linux__) || defined(__NetBSD__)
-#define DRM_MAJOR 226
-#endif
-#define DRM_MAX_MINOR 15
-#endif
-#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
-#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
-#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
-#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
-
-#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
-#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
-#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
-#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
-#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
-
-#if defined(__linux__)
-#if defined(__KERNEL__)
-typedef __u64 drm_u64_t;
-#else
-typedef unsigned long long drm_u64_t;
-#endif
-
-typedef unsigned int drm_handle_t;
-#else
-#include <sys/types.h>
-typedef uint64_t drm_u64_t;
-typedef unsigned long long drm_handle_t; /**< To mapped regions */
-#endif
-typedef unsigned int drm_context_t; /**< GLXContext handle */
-typedef unsigned int drm_drawable_t;
-typedef unsigned int drm_magic_t; /**< Magic for authentication */
-
-/**
- * Cliprect.
- *
- * \warning If you change this structure, make sure you change
- * XF86DRIClipRectRec in the server as well
- *
- * \note KW: Actually it's illegal to change either for
- * backwards-compatibility reasons.
- */
-typedef struct drm_clip_rect {
- unsigned short x1;
- unsigned short y1;
- unsigned short x2;
- unsigned short y2;
-} drm_clip_rect_t;
-
-/**
- * Drawable information.
- */
-typedef struct drm_drawable_info {
- unsigned int num_rects;
- drm_clip_rect_t *rects;
-} drm_drawable_info_t;
-
-/**
- * Texture region,
- */
-typedef struct drm_tex_region {
- unsigned char next;
- unsigned char prev;
- unsigned char in_use;
- unsigned char padding;
- unsigned int age;
-} drm_tex_region_t;
-
-/**
- * Hardware lock.
- *
- * The lock structure is a simple cache-line aligned integer. To avoid
- * processor bus contention on a multiprocessor system, there should not be any
- * other data stored in the same cache line.
- */
-typedef struct drm_hw_lock {
- __volatile__ unsigned int lock; /**< lock variable */
- char padding[60]; /**< Pad to cache line */
-} drm_hw_lock_t;
-
-/* This is beyond ugly, and only works on GCC. However, it allows me to use
- * drm.h in places (i.e., in the X-server) where I can't use size_t. The real
- * fix is to use uint32_t instead of size_t, but that fix will break existing
- * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems. That *will*
- * eventually happen, though. I chose 'unsigned long' to be the fallback type
- * because that works on all the platforms I know about. Hopefully, the
- * real fix will happen before that bites us.
- */
-
-#ifdef __SIZE_TYPE__
-# define DRM_SIZE_T __SIZE_TYPE__
-#else
-#if !defined(__SOLARIS__) && !defined(sun)
-# warning "__SIZE_TYPE__ not defined. Assuming sizeof(size_t) == sizeof(unsigned long)!"
-#endif
-# define DRM_SIZE_T unsigned long
-#endif
-
-/**
- * DRM_IOCTL_VERSION ioctl argument type.
- *
- * \sa drmGetVersion().
- */
-typedef struct drm_version {
- int version_major; /**< Major version */
- int version_minor; /**< Minor version */
- int version_patchlevel; /**< Patch level */
- DRM_SIZE_T name_len; /**< Length of name buffer */
- char __user *name; /**< Name of driver */
- DRM_SIZE_T date_len; /**< Length of date buffer */
- char __user *date; /**< User-space buffer to hold date */
- DRM_SIZE_T desc_len; /**< Length of desc buffer */
- char __user *desc; /**< User-space buffer to hold desc */
-} drm_version_t;
-
-/**
- * DRM_IOCTL_GET_UNIQUE ioctl argument type.
- *
- * \sa drmGetBusid() and drmSetBusId().
- */
-typedef struct drm_unique {
- DRM_SIZE_T unique_len; /**< Length of unique */
- char __user *unique; /**< Unique name for driver instantiation */
-} drm_unique_t;
-
-#undef DRM_SIZE_T
-
-typedef struct drm_list {
- int count; /**< Length of user-space structures */
- drm_version_t __user *version;
-} drm_list_t;
-
-typedef struct drm_block {
- int unused;
-} drm_block_t;
-
-/**
- * DRM_IOCTL_CONTROL ioctl argument type.
- *
- * \sa drmCtlInstHandler() and drmCtlUninstHandler().
- */
-typedef struct drm_control {
- enum {
- DRM_ADD_COMMAND,
- DRM_RM_COMMAND,
- DRM_INST_HANDLER,
- DRM_UNINST_HANDLER
- } func;
- int irq;
-} drm_control_t;
-
-/**
- * Type of memory to map.
- */
-typedef enum drm_map_type {
- _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
- _DRM_REGISTERS = 1, /**< no caching, no core dump */
- _DRM_SHM = 2, /**< shared, cached */
- _DRM_AGP = 3, /**< AGP/GART */
- _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
- _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
- _DRM_TTM = 6
-} drm_map_type_t;
-
-/**
- * Memory mapping flags.
- */
-typedef enum drm_map_flags {
- _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
- _DRM_READ_ONLY = 0x02,
- _DRM_LOCKED = 0x04, /**< shared, cached, locked */
- _DRM_KERNEL = 0x08, /**< kernel requires access */
- _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
- _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
- _DRM_REMOVABLE = 0x40, /**< Removable mapping */
- _DRM_DRIVER = 0x80 /**< Managed by driver */
-} drm_map_flags_t;
-
-typedef struct drm_ctx_priv_map {
- unsigned int ctx_id; /**< Context requesting private mapping */
- void *handle; /**< Handle of map */
-} drm_ctx_priv_map_t;
-
-/**
- * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
- * argument type.
- *
- * \sa drmAddMap().
- */
-typedef struct drm_map {
- unsigned long long offset; /**< Requested physical address (0 for SAREA)*/
- unsigned long long handle;
- /**< User-space: "Handle" to pass to mmap() */
- /**< Kernel-space: kernel-virtual address */
- unsigned long size; /**< Requested physical size (bytes) */
- drm_map_type_t type; /**< Type of memory to map */
- drm_map_flags_t flags; /**< Flags */
- int mtrr; /**< MTRR slot used */
- /* Private data */
-} drm_map_t;
-
-/**
- * DRM_IOCTL_GET_CLIENT ioctl argument type.
- */
-typedef struct drm_client {
- int idx; /**< Which client desired? */
- int auth; /**< Is client authenticated? */
- unsigned long pid; /**< Process ID */
- unsigned long uid; /**< User ID */
- unsigned long magic; /**< Magic */
- unsigned long iocs; /**< Ioctl count */
-} drm_client_t;
-
-typedef enum {
- _DRM_STAT_LOCK,
- _DRM_STAT_OPENS,
- _DRM_STAT_CLOSES,
- _DRM_STAT_IOCTLS,
- _DRM_STAT_LOCKS,
- _DRM_STAT_UNLOCKS,
- _DRM_STAT_VALUE, /**< Generic value */
- _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
- _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
-
- _DRM_STAT_IRQ, /**< IRQ */
- _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
- _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
- _DRM_STAT_DMA, /**< DMA */
- _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
- _DRM_STAT_MISSED /**< Missed DMA opportunity */
- /* Add to the *END* of the list */
-} drm_stat_type_t;
-
-/**
- * DRM_IOCTL_GET_STATS ioctl argument type.
- */
-typedef struct drm_stats {
- unsigned long count;
- struct {
- unsigned long value;
- drm_stat_type_t type;
- } data[15];
-} drm_stats_t;
-
-/**
- * Hardware locking flags.
- */
-typedef enum drm_lock_flags {
- _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
- _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
- _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
- _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
- /* These *HALT* flags aren't supported yet
- -- they will be used to support the
- full-screen DGA-like mode. */
- _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
- _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
-} drm_lock_flags_t;
-
-/**
- * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
- *
- * \sa drmGetLock() and drmUnlock().
- */
-typedef struct drm_lock {
- int context;
- drm_lock_flags_t flags;
-} drm_lock_t;
-
-/**
- * DMA flags
- *
- * \warning
- * These values \e must match xf86drm.h.
- *
- * \sa drm_dma.
- */
-typedef enum drm_dma_flags {
- /* Flags for DMA buffer dispatch */
- _DRM_DMA_BLOCK = 0x01, /**<
- * Block until buffer dispatched.
- *
- * \note The buffer may not yet have
- * been processed by the hardware --
- * getting a hardware lock with the
- * hardware quiescent will ensure
- * that the buffer has been
- * processed.
- */
- _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
- _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
-
- /* Flags for DMA buffer request */
- _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
- _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
- _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
-} drm_dma_flags_t;
-
-/**
- * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
- *
- * \sa drmAddBufs().
- */
-typedef enum {
- _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
- _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
- _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
- _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
- _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
-} drm_buf_flag;
-typedef struct drm_buf_desc {
- int count; /**< Number of buffers of this size */
- int size; /**< Size in bytes */
- int low_mark; /**< Low water mark */
- int high_mark; /**< High water mark */
- drm_buf_flag flags;
- unsigned long agp_start; /**<
- * Start address of where the AGP buffers are
- * in the AGP aperture
- */
-} drm_buf_desc_t;
-
-/**
- * DRM_IOCTL_INFO_BUFS ioctl argument type.
- */
-typedef struct drm_buf_info {
- int count; /**< Number of buffers described in list */
- drm_buf_desc_t __user *list; /**< List of buffer descriptions */
-} drm_buf_info_t;
-
-/**
- * DRM_IOCTL_FREE_BUFS ioctl argument type.
- */
-typedef struct drm_buf_free {
- int count;
- int __user *list;
-} drm_buf_free_t;
-
-/**
- * Buffer information
- *
- * \sa drm_buf_map.
- */
-typedef struct drm_buf_pub {
- int idx; /**< Index into the master buffer list */
- int total; /**< Buffer size */
- int used; /**< Amount of buffer in use (for DMA) */
- void __user *address; /**< Address of buffer */
-} drm_buf_pub_t;
-
-/**
- * DRM_IOCTL_MAP_BUFS ioctl argument type.
- */
-typedef struct drm_buf_map {
- int count; /**< Length of the buffer list */
-#if defined(__cplusplus)
- void __user *c_virtual;
-#else
- void __user *virtual; /**< Mmap'd area in user-virtual */
-#endif
- drm_buf_pub_t __user *list; /**< Buffer information */
- int fd;
-} drm_buf_map_t;
-
-/**
- * DRM_IOCTL_DMA ioctl argument type.
- *
- * Indices here refer to the offset into the buffer list in drm_buf_get.
- *
- * \sa drmDMA().
- */
-typedef struct drm_dma {
- int context; /**< Context handle */
- int send_count; /**< Number of buffers to send */
- int __user *send_indices; /**< List of handles to buffers */
- int __user *send_sizes; /**< Lengths of data to send */
- drm_dma_flags_t flags; /**< Flags */
- int request_count; /**< Number of buffers requested */
- int request_size; /**< Desired size for buffers */
- int __user *request_indices; /**< Buffer information */
- int __user *request_sizes;
- int granted_count; /**< Number of buffers granted */
-} drm_dma_t;
-
-typedef enum {
- _DRM_CONTEXT_PRESERVED = 0x01,
- _DRM_CONTEXT_2DONLY = 0x02
-} drm_ctx_flags_t;
-
-/**
- * DRM_IOCTL_ADD_CTX ioctl argument type.
- *
- * \sa drmCreateContext() and drmDestroyContext().
- */
-typedef struct drm_ctx {
- drm_context_t handle;
- drm_ctx_flags_t flags;
-} drm_ctx_t;
-
-/**
- * DRM_IOCTL_RES_CTX ioctl argument type.
- */
-typedef struct drm_ctx_res {
- int count;
- drm_ctx_t __user *contexts;
-} drm_ctx_res_t;
-
-
-/**
- * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
- */
-typedef struct drm_draw {
- drm_drawable_t handle;
-} drm_draw_t;
-
-/**
- * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
- */
-typedef enum {
- DRM_DRAWABLE_CLIPRECTS,
-} drm_drawable_info_type_t;
-
-typedef struct drm_update_draw {
- drm_drawable_t handle;
- unsigned int type;
- unsigned int num;
- unsigned long long data;
-} drm_update_draw_t;
-
-/**
- * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
- */
-typedef struct drm_auth {
- drm_magic_t magic;
-} drm_auth_t;
-
-/**
- * DRM_IOCTL_IRQ_BUSID ioctl argument type.
- *
- * \sa drmGetInterruptFromBusID().
- */
-typedef struct drm_irq_busid {
- int irq; /**< IRQ number */
- int busnum; /**< bus number */
- int devnum; /**< device number */
- int funcnum; /**< function number */
-} drm_irq_busid_t;
-
-typedef enum {
- _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
- _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
- _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
- _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
- _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
- _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
-} drm_vblank_seq_type_t;
-
-#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
-#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
- _DRM_VBLANK_NEXTONMISS)
-
-struct drm_wait_vblank_request {
- drm_vblank_seq_type_t type;
- unsigned int sequence;
- unsigned long signal;
-};
-
-struct drm_wait_vblank_reply {
- drm_vblank_seq_type_t type;
- unsigned int sequence;
- long tval_sec;
- long tval_usec;
-};
-
-/**
- * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
- *
- * \sa drmWaitVBlank().
- */
-typedef union drm_wait_vblank {
- struct drm_wait_vblank_request request;
- struct drm_wait_vblank_reply reply;
-} drm_wait_vblank_t;
-
-#define _DRM_PRE_MODESET 1
-#define _DRM_POST_MODESET 2
-
-/**
- * DRM_IOCTL_MODESET_CTL ioctl argument type
- *
- * \sa drmModesetCtl().
- */
-typedef struct drm_modeset_ctl {
- uint32_t crtc;
- uint32_t cmd;
-} drm_modeset_ctl_t;
-
-/**
- * DRM_IOCTL_AGP_ENABLE ioctl argument type.
- *
- * \sa drmAgpEnable().
- */
-typedef struct drm_agp_mode {
- unsigned long mode; /**< AGP mode */
-} drm_agp_mode_t;
-
-/**
- * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
- *
- * \sa drmAgpAlloc() and drmAgpFree().
- */
-typedef struct drm_agp_buffer {
- unsigned long size; /**< In bytes -- will round to page boundary */
- unsigned long handle; /**< Used for binding / unbinding */
- unsigned long type; /**< Type of memory to allocate */
- unsigned long physical; /**< Physical used by i810 */
-} drm_agp_buffer_t;
-
-/**
- * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
- *
- * \sa drmAgpBind() and drmAgpUnbind().
- */
-typedef struct drm_agp_binding {
- unsigned long handle; /**< From drm_agp_buffer */
- unsigned long offset; /**< In bytes -- will round to page boundary */
-} drm_agp_binding_t;
-
-/**
- * DRM_IOCTL_AGP_INFO ioctl argument type.
- *
- * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
- * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
- * drmAgpVendorId() and drmAgpDeviceId().
- */
-typedef struct drm_agp_info {
- int agp_version_major;
- int agp_version_minor;
- unsigned long mode;
- unsigned long aperture_base; /**< physical address */
- unsigned long aperture_size; /**< bytes */
- unsigned long memory_allowed; /**< bytes */
- unsigned long memory_used;
-
- /** \name PCI information */
- /*@{ */
- unsigned short id_vendor;
- unsigned short id_device;
- /*@} */
-} drm_agp_info_t;
-
-/**
- * DRM_IOCTL_SG_ALLOC ioctl argument type.
- */
-typedef struct drm_scatter_gather {
- unsigned long size; /**< In bytes -- will round to page boundary */
- unsigned long handle; /**< Used for mapping / unmapping */
-} drm_scatter_gather_t;
-
-/**
- * DRM_IOCTL_SET_VERSION ioctl argument type.
- */
-typedef struct drm_set_version {
- int drm_di_major;
- int drm_di_minor;
- int drm_dd_major;
- int drm_dd_minor;
-} drm_set_version_t;
-
-/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
-typedef struct drm_gem_close {
- /** Handle of the object to be closed. */
- uint32_t handle;
- uint32_t pad;
-} drm_gem_close_t;
-
-/** DRM_IOCTL_GEM_FLINK ioctl argument type */
-typedef struct drm_gem_flink {
- /** Handle for the object being named */
- uint32_t handle;
-
- /** Returned global name */
- uint32_t name;
-} drm_gem_flink_t;
-
-/** DRM_IOCTL_GEM_OPEN ioctl argument type */
-typedef struct drm_gem_open {
- /** Name of object being opened */
- uint32_t name;
-
- /** Returned handle for the object */
- uint32_t handle;
-
- /** Returned size of the object */
- uint64_t size;
-} drm_gem_open_t;
-
-/**
- * \name Ioctls Definitions
- */
-/*@{*/
-
-#define DRM_IOCTL_BASE 'd'
-#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
-#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
-#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
-#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
-
-#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
-#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
-#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t)
-#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
-#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, drm_map_t)
-#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, drm_client_t)
-#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, drm_stats_t)
-#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, drm_set_version_t)
-#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, drm_modeset_ctl_t)
-#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, drm_gem_close_t)
-#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, drm_gem_flink_t)
-#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, drm_gem_open_t)
-
-#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
-#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
-#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
-#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
-#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
-#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
-#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
-#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
-#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
-#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
-#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
-
-#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, drm_map_t)
-
-#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, drm_ctx_priv_map_t)
-#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, drm_ctx_priv_map_t)
-
-#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
-#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
-#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
-#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
-#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
-#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
-#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
-#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
-#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
-#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
-#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
-#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
-#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
-
-#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
-#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
-#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
-#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
-#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
-#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
-
-#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, drm_scatter_gather_t)
-#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, drm_scatter_gather_t)
-
-#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t)
-
-#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t)
-/*@}*/
-
-/**
- * Device specific ioctls should only be in their respective headers
- * The device specific ioctl range is from 0x40 to 0x99.
- * Generic IOCTLS restart at 0xA0.
- *
- * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
- * drmCommandReadWrite().
- */
-#define DRM_COMMAND_BASE 0x40
-#define DRM_COMMAND_END 0xA0
-
-#endif /* _DRM_H_ */
diff --git a/usr/src/uts/common/io/drm/drmP.h b/usr/src/uts/common/io/drm/drmP.h
deleted file mode 100644
index 16c02e5..0000000
--- a/usr/src/uts/common/io/drm/drmP.h
+++ /dev/null
@@ -1,1103 +0,0 @@
-/*
- * drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
- * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
- */
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
- */
-
-/*
- * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _DRMP_H
-#define _DRMP_H
-
-#include <sys/sysmacros.h>
-#include <sys/types.h>
-#include <sys/conf.h>
-#include <sys/modctl.h>
-#include <sys/stat.h>
-#include <sys/file.h>
-#include <sys/cmn_err.h>
-#include <sys/varargs.h>
-#include <sys/pci.h>
-#include <sys/ddi.h>
-#include <sys/sunddi.h>
-#include <sys/sunldi.h>
-#include <sys/pmem.h>
-#include <sys/agpgart.h>
-#include <sys/time.h>
-#include <sys/sysmacros.h>
-#include "drm_atomic.h"
-#include "drm.h"
-#include "queue.h"
-#include "drm_linux_list.h"
-
-#ifndef __inline__
-#define __inline__ inline
-#endif
-
-#if !defined(__FUNCTION__)
-#if defined(C99)
-#define __FUNCTION__ __func__
-#else
-#define __FUNCTION__ " "
-#endif
-#endif
-
-/* DRM space units */
-#define DRM_PAGE_SHIFT PAGESHIFT
-#define DRM_PAGE_SIZE (1 << DRM_PAGE_SHIFT)
-#define DRM_PAGE_OFFSET (DRM_PAGE_SIZE - 1)
-#define DRM_PAGE_MASK ~(DRM_PAGE_SIZE - 1)
-#define DRM_MB2PAGES(x) ((x) << 8)
-#define DRM_PAGES2BYTES(x) ((x) << DRM_PAGE_SHIFT)
-#define DRM_BYTES2PAGES(x) ((x) >> DRM_PAGE_SHIFT)
-#define DRM_PAGES2KB(x) ((x) << 2)
-#define DRM_ALIGNED(offset) (((offset) & DRM_PAGE_OFFSET) == 0)
-
-#define PAGE_SHIFT DRM_PAGE_SHIFT
-#define PAGE_SIZE DRM_PAGE_SIZE
-
-#define DRM_MAX_INSTANCES 8
-#define DRM_DEVNODE "drm"
-#define DRM_UNOPENED 0
-#define DRM_OPENED 1
-
-#define DRM_HASH_SIZE 16 /* Size of key hash table */
-#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
-#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
-
-#define DRM_MEM_DMA 0
-#define DRM_MEM_SAREA 1
-#define DRM_MEM_DRIVER 2
-#define DRM_MEM_MAGIC 3
-#define DRM_MEM_IOCTLS 4
-#define DRM_MEM_MAPS 5
-#define DRM_MEM_BUFS 6
-#define DRM_MEM_SEGS 7
-#define DRM_MEM_PAGES 8
-#define DRM_MEM_FILES 9
-#define DRM_MEM_QUEUES 10
-#define DRM_MEM_CMDS 11
-#define DRM_MEM_MAPPINGS 12
-#define DRM_MEM_BUFLISTS 13
-#define DRM_MEM_DRMLISTS 14
-#define DRM_MEM_TOTALDRM 15
-#define DRM_MEM_BOUNDDRM 16
-#define DRM_MEM_CTXBITMAP 17
-#define DRM_MEM_STUB 18
-#define DRM_MEM_SGLISTS 19
-#define DRM_MEM_AGPLISTS 20
-#define DRM_MEM_CTXLIST 21
-#define DRM_MEM_MM 22
-#define DRM_MEM_HASHTAB 23
-#define DRM_MEM_OBJECTS 24
-
-#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
-#define DRM_MAP_HASH_OFFSET 0x10000000
-#define DRM_MAP_HASH_ORDER 12
-#define DRM_OBJECT_HASH_ORDER 12
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-#define DRM_MM_INIT_MAX_PAGES 256
-
-
-/* Internal types and structures */
-#define DRM_ARRAY_SIZE(x) (sizeof (x) / sizeof (x[0]))
-#define DRM_MIN(a, b) ((a) < (b) ? (a) : (b))
-#define DRM_MAX(a, b) ((a) > (b) ? (a) : (b))
-
-#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
-
-#define __OS_HAS_AGP 1
-
-#define DRM_DEV_MOD (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
-#define DRM_DEV_UID 0
-#define DRM_DEV_GID 0
-
-#define DRM_CURRENTPID ddi_get_pid()
-#define DRM_SPINLOCK(l) mutex_enter(l)
-#define DRM_SPINUNLOCK(u) mutex_exit(u)
-#define DRM_SPINLOCK_ASSERT(l)
-#define DRM_LOCK() mutex_enter(&dev->dev_lock)
-#define DRM_UNLOCK() mutex_exit(&dev->dev_lock)
-#define DRM_LOCK_OWNED() ASSERT(mutex_owned(&dev->dev_lock))
-#define spin_lock_irqsave(l, flag) mutex_enter(l)
-#define spin_unlock_irqrestore(u, flag) mutex_exit(u)
-#define spin_lock(l) mutex_enter(l)
-#define spin_unlock(u) mutex_exit(u)
-
-
-#define DRM_UDELAY(sec) delay(drv_usectohz(sec *1000))
-#define DRM_MEMORYBARRIER()
-
-typedef struct drm_file drm_file_t;
-typedef struct drm_device drm_device_t;
-typedef struct drm_driver_info drm_driver_t;
-
-#define DRM_DEVICE drm_device_t *dev = dev1
-#define DRM_IOCTL_ARGS \
- drm_device_t *dev1, intptr_t data, drm_file_t *fpriv, int mode
-
-#define DRM_COPYFROM_WITH_RETURN(dest, src, size) \
- if (ddi_copyin((src), (dest), (size), 0)) { \
- DRM_ERROR("%s: copy from user failed", __func__); \
- return (EFAULT); \
- }
-
-#define DRM_COPYTO_WITH_RETURN(dest, src, size) \
- if (ddi_copyout((src), (dest), (size), 0)) { \
- DRM_ERROR("%s: copy to user failed", __func__); \
- return (EFAULT); \
- }
-
-#define DRM_COPY_FROM_USER(dest, src, size) \
- ddi_copyin((src), (dest), (size), 0) /* flag for src */
-
-#define DRM_COPY_TO_USER(dest, src, size) \
- ddi_copyout((src), (dest), (size), 0) /* flags for dest */
-
-#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
- ddi_copyin((arg2), (arg1), (arg3), 0)
-
-#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
- ddi_copyout((arg2), arg1, arg3, 0)
-
-#define DRM_READ8(map, offset) \
- *(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset))
-#define DRM_READ16(map, offset) \
- *(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset))
-#define DRM_READ32(map, offset) \
- *(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset))
-#define DRM_WRITE8(map, offset, val) \
- *(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
-#define DRM_WRITE16(map, offset, val) \
- *(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
-#define DRM_WRITE32(map, offset, val) \
- *(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
-
-typedef struct drm_wait_queue {
- kcondvar_t cv;
- kmutex_t lock;
-}wait_queue_head_t;
-
-#define DRM_INIT_WAITQUEUE(q, pri) \
-{ \
- mutex_init(&(q)->lock, NULL, MUTEX_DRIVER, pri); \
- cv_init(&(q)->cv, NULL, CV_DRIVER, NULL); \
-}
-
-#define DRM_FINI_WAITQUEUE(q) \
-{ \
- mutex_destroy(&(q)->lock); \
- cv_destroy(&(q)->cv); \
-}
-
-#define DRM_WAKEUP(q) \
-{ \
- mutex_enter(&(q)->lock); \
- cv_broadcast(&(q)->cv); \
- mutex_exit(&(q)->lock); \
-}
-
-#define jiffies ddi_get_lbolt()
-
-#define DRM_WAIT_ON(ret, q, timeout, condition) \
- mutex_enter(&(q)->lock); \
- while (!(condition)) { \
- ret = cv_reltimedwait_sig(&(q)->cv, &(q)->lock, timeout,\
- TR_CLOCK_TICK); \
- if (ret == -1) { \
- ret = EBUSY; \
- break; \
- } else if (ret == 0) { \
- ret = EINTR; \
- break; \
- } else { \
- ret = 0; \
- } \
- } \
- mutex_exit(&(q)->lock);
-
-#define DRM_WAIT(ret, q, condition) \
-mutex_enter(&(q)->lock); \
-if (!(condition)) { \
- ret = cv_timedwait_sig(&(q)->cv, &(q)->lock, jiffies + 30 * DRM_HZ); \
- if (ret == -1) { \
- /* gfx maybe hang */ \
- if (!(condition)) \
- ret = -2; \
- } else { \
- ret = 0; \
- } \
-} \
-mutex_exit(&(q)->lock);
-
-
-#define DRM_GETSAREA() \
-{ \
- drm_local_map_t *map; \
- DRM_SPINLOCK_ASSERT(&dev->dev_lock); \
- TAILQ_FOREACH(map, &dev->maplist, link) { \
- if (map->type == _DRM_SHM && \
- map->flags & _DRM_CONTAINS_LOCK) { \
- dev_priv->sarea = map; \
- break; \
- } \
- } \
-}
-
-#define LOCK_TEST_WITH_RETURN(dev, fpriv) \
- if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \
- dev->lock.filp != fpriv) { \
- DRM_DEBUG("%s called without lock held", __func__); \
- return (EINVAL); \
- }
-
-#define DRM_IRQ_ARGS caddr_t arg
-#define IRQ_HANDLED DDI_INTR_CLAIMED
-#define IRQ_NONE DDI_INTR_UNCLAIMED
-
-enum {
- DRM_IS_NOT_AGP,
- DRM_IS_AGP,
- DRM_MIGHT_BE_AGP
-};
-
-/* Capabilities taken from src/sys/dev/pci/pcireg.h. */
-#ifndef PCIY_AGP
-#define PCIY_AGP 0x02
-#endif
-
-#ifndef PCIY_EXPRESS
-#define PCIY_EXPRESS 0x10
-#endif
-
-#define PAGE_ALIGN(addr) (((addr) + DRM_PAGE_SIZE - 1) & DRM_PAGE_MASK)
-#define DRM_SUSER(p) (crgetsgid(p) == 0 || crgetsuid(p) == 0)
-
-#define DRM_GEM_OBJIDR_HASHNODE 1024
-#define idr_list_for_each(entry, head) \
- for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) \
- list_for_each(entry, &(head)->next[key])
-
-/*
- * wait for 400 milliseconds
- */
-#define DRM_HZ drv_usectohz(400000)
-
-typedef unsigned long dma_addr_t;
-typedef uint64_t u64;
-typedef uint32_t u32;
-typedef uint16_t u16;
-typedef uint8_t u8;
-typedef uint_t irqreturn_t;
-
-#define DRM_SUPPORT 1
-#define DRM_UNSUPPORT 0
-
-#define __OS_HAS_AGP 1
-
-typedef struct drm_pci_id_list
-{
- int vendor;
- int device;
- long driver_private;
- char *name;
-} drm_pci_id_list_t;
-
-#define DRM_AUTH 0x1
-#define DRM_MASTER 0x2
-#define DRM_ROOT_ONLY 0x4
-typedef int drm_ioctl_t(DRM_IOCTL_ARGS);
-typedef struct drm_ioctl_desc {
- int (*func)(DRM_IOCTL_ARGS);
- int flags;
-} drm_ioctl_desc_t;
-
-typedef struct drm_magic_entry {
- drm_magic_t magic;
- struct drm_file *priv;
- struct drm_magic_entry *next;
-} drm_magic_entry_t;
-
-typedef struct drm_magic_head {
- struct drm_magic_entry *head;
- struct drm_magic_entry *tail;
-} drm_magic_head_t;
-
-typedef struct drm_buf {
- int idx; /* Index into master buflist */
- int total; /* Buffer size */
- int order; /* log-base-2(total) */
- int used; /* Amount of buffer in use (for DMA) */
- unsigned long offset; /* Byte offset (used internally) */
- void *address; /* Address of buffer */
- unsigned long bus_address; /* Bus address of buffer */
- struct drm_buf *next; /* Kernel-only: used for free list */
- volatile int pending; /* On hardware DMA queue */
- drm_file_t *filp;
- /* Uniq. identifier of holding process */
- int context; /* Kernel queue for this buffer */
- enum {
- DRM_LIST_NONE = 0,
- DRM_LIST_FREE = 1,
- DRM_LIST_WAIT = 2,
- DRM_LIST_PEND = 3,
- DRM_LIST_PRIO = 4,
- DRM_LIST_RECLAIM = 5
- } list; /* Which list we're on */
-
- int dev_priv_size; /* Size of buffer private stoarge */
- void *dev_private; /* Per-buffer private storage */
-} drm_buf_t;
-
-typedef struct drm_freelist {
- int initialized; /* Freelist in use */
- uint32_t count; /* Number of free buffers */
- drm_buf_t *next; /* End pointer */
-
- int low_mark; /* Low water mark */
- int high_mark; /* High water mark */
-} drm_freelist_t;
-
-typedef struct drm_buf_entry {
- int buf_size;
- int buf_count;
- drm_buf_t *buflist;
- int seg_count;
- int page_order;
-
- uint32_t *seglist;
- unsigned long *seglist_bus;
-
- drm_freelist_t freelist;
-} drm_buf_entry_t;
-
-typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
-
-/* BEGIN CSTYLED */
-typedef struct drm_local_map {
- unsigned long offset; /* Physical address (0 for SAREA) */
- unsigned long size; /* Physical size (bytes) */
- drm_map_type_t type; /* Type of memory mapped */
- drm_map_flags_t flags; /* Flags */
- void *handle; /* User-space: "Handle" to pass to mmap */
- /* Kernel-space: kernel-virtual address */
- int mtrr; /* Boolean: MTRR used */
- /* Private data */
- int rid; /* PCI resource ID for bus_space */
- int kernel_owned; /* Boolean: 1= initmapped, 0= addmapped */
- caddr_t dev_addr; /* base device address */
- ddi_acc_handle_t dev_handle; /* The data access handle */
- ddi_umem_cookie_t drm_umem_cookie; /* For SAREA alloc and free */
- TAILQ_ENTRY(drm_local_map) link;
-} drm_local_map_t;
-/* END CSTYLED */
-
-/*
- * This structure defines the drm_mm memory object, which will be used by the
- * DRM for its buffer objects.
- */
-struct drm_gem_object {
- /* Reference count of this object */
- atomic_t refcount;
-
- /* Handle count of this object. Each handle also holds a reference */
- atomic_t handlecount;
-
- /* Related drm device */
- struct drm_device *dev;
-
- int flink;
- /*
- * Size of the object, in bytes. Immutable over the object's
- * lifetime.
- */
- size_t size;
-
- /*
- * Global name for this object, starts at 1. 0 means unnamed.
- * Access is covered by the object_name_lock in the related drm_device
- */
- int name;
-
- /*
- * Memory domains. These monitor which caches contain read/write data
- * related to the object. When transitioning from one set of domains
- * to another, the driver is called to ensure that caches are suitably
- * flushed and invalidated
- */
- uint32_t read_domains;
- uint32_t write_domain;
-
- /*
- * While validating an exec operation, the
- * new read/write domain values are computed here.
- * They will be transferred to the above values
- * at the point that any cache flushing occurs
- */
- uint32_t pending_read_domains;
- uint32_t pending_write_domain;
-
- void *driver_private;
-
- drm_local_map_t *map;
- ddi_dma_handle_t dma_hdl;
- ddi_acc_handle_t acc_hdl;
- caddr_t kaddr;
- size_t real_size; /* real size of memory */
- pfn_t *pfnarray;
-};
-
-struct idr_list {
- struct idr_list *next, *prev;
- struct drm_gem_object *obj;
- uint32_t handle;
- caddr_t contain_ptr;
-};
-
-struct drm_file {
- TAILQ_ENTRY(drm_file) link;
- int authenticated;
- int master;
- int minor;
- pid_t pid;
- uid_t uid;
- int refs;
- drm_magic_t magic;
- unsigned long ioctl_count;
- void *driver_priv;
- /* Mapping of mm object handles to object pointers. */
- struct idr_list object_idr;
- /* Lock for synchronization of access to object_idr. */
- kmutex_t table_lock;
-
- dev_t dev;
- cred_t *credp;
-};
-
-typedef struct drm_lock_data {
- drm_hw_lock_t *hw_lock; /* Hardware lock */
- drm_file_t *filp;
- /* Uniq. identifier of holding process */
- kcondvar_t lock_cv; /* lock queue - SOLARIS Specific */
- kmutex_t lock_mutex; /* lock - SOLARIS Specific */
- unsigned long lock_time; /* Time of last lock in clock ticks */
-} drm_lock_data_t;
-
-/*
- * This structure, in drm_device_t, is always initialized while the device
- * is open. dev->dma_lock protects the incrementing of dev->buf_use, which
- * when set marks that no further bufs may be allocated until device teardown
- * occurs (when the last open of the device has closed). The high/low
- * watermarks of bufs are only touched by the X Server, and thus not
- * concurrently accessed, so no locking is needed.
- */
-typedef struct drm_device_dma {
- drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
- int buf_count;
- drm_buf_t **buflist; /* Vector of pointers info bufs */
- int seg_count;
- int page_count;
- unsigned long *pagelist;
- unsigned long byte_count;
- enum {
- _DRM_DMA_USE_AGP = 0x01,
- _DRM_DMA_USE_SG = 0x02
- } flags;
-} drm_device_dma_t;
-
-typedef struct drm_agp_mem {
- void *handle;
- unsigned long bound; /* address */
- int pages;
- caddr_t phys_addr;
- struct drm_agp_mem *prev;
- struct drm_agp_mem *next;
-} drm_agp_mem_t;
-
-typedef struct drm_agp_head {
- agp_info_t agp_info;
- const char *chipset;
- drm_agp_mem_t *memory;
- unsigned long mode;
- int enabled;
- int acquired;
- unsigned long base;
- int mtrr;
- int cant_use_aperture;
- unsigned long page_mask;
- ldi_ident_t agpgart_li;
- ldi_handle_t agpgart_lh;
-} drm_agp_head_t;
-
-
-typedef struct drm_dma_handle {
- ddi_dma_handle_t dma_hdl;
- ddi_acc_handle_t acc_hdl;
- ddi_dma_cookie_t cookie;
- uint_t cookie_num;
- uintptr_t vaddr; /* virtual addr */
- uintptr_t paddr; /* physical addr */
- size_t real_sz; /* real size of memory */
-} drm_dma_handle_t;
-
-typedef struct drm_sg_mem {
- unsigned long handle;
- void *virtual;
- int pages;
- dma_addr_t *busaddr;
- ddi_umem_cookie_t *umem_cookie;
- drm_dma_handle_t *dmah_sg;
- drm_dma_handle_t *dmah_gart; /* Handle to PCI memory */
-} drm_sg_mem_t;
-
-/*
- * Generic memory manager structs
- */
-
-struct drm_mm_node {
- struct list_head fl_entry;
- struct list_head ml_entry;
- int free;
- unsigned long start;
- unsigned long size;
- struct drm_mm *mm;
- void *private;
-};
-
-struct drm_mm {
- struct list_head fl_entry;
- struct list_head ml_entry;
-};
-
-typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t;
-
-typedef TAILQ_HEAD(drm_vbl_sig_list, drm_vbl_sig) drm_vbl_sig_list_t;
-typedef struct drm_vbl_sig {
- TAILQ_ENTRY(drm_vbl_sig) link;
- unsigned int sequence;
- int signo;
- int pid;
-} drm_vbl_sig_t;
-
-
-/* used for clone device */
-typedef TAILQ_HEAD(drm_cminor_list, drm_cminor) drm_cminor_list_t;
-typedef struct drm_cminor {
- TAILQ_ENTRY(drm_cminor) link;
- drm_file_t *fpriv;
- int minor;
-} drm_cminor_t;
-
-/* location of GART table */
-#define DRM_ATI_GART_MAIN 1
-#define DRM_ATI_GART_FB 2
-
-typedef struct ati_pcigart_info {
- int gart_table_location;
- int is_pcie;
- void *addr;
- dma_addr_t bus_addr;
- drm_local_map_t mapping;
-} drm_ati_pcigart_info;
-
-/* DRM device structure */
-struct drm_device;
-struct drm_driver_info {
- int (*load)(struct drm_device *, unsigned long);
- int (*firstopen)(struct drm_device *);
- int (*open)(struct drm_device *, drm_file_t *);
- void (*preclose)(struct drm_device *, drm_file_t *);
- void (*postclose)(struct drm_device *, drm_file_t *);
- void (*lastclose)(struct drm_device *);
- int (*unload)(struct drm_device *);
- void (*reclaim_buffers_locked)(struct drm_device *, drm_file_t *);
- int (*presetup)(struct drm_device *);
- int (*postsetup)(struct drm_device *);
- int (*open_helper)(struct drm_device *, drm_file_t *);
- void (*free_filp_priv)(struct drm_device *, drm_file_t *);
- void (*release)(struct drm_device *, void *);
- int (*dma_ioctl)(DRM_IOCTL_ARGS);
- void (*dma_ready)(struct drm_device *);
- int (*dma_quiescent)(struct drm_device *);
- int (*dma_flush_block_and_flush)(struct drm_device *,
- int, drm_lock_flags_t);
- int (*dma_flush_unblock)(struct drm_device *, int,
- drm_lock_flags_t);
- int (*context_ctor)(struct drm_device *, int);
- int (*context_dtor)(struct drm_device *, int);
- int (*kernel_context_switch)(struct drm_device *, int, int);
- int (*kernel_context_switch_unlock)(struct drm_device *);
- int (*device_is_agp) (struct drm_device *);
- int (*irq_preinstall)(struct drm_device *);
- void (*irq_postinstall)(struct drm_device *);
- void (*irq_uninstall)(struct drm_device *dev);
- uint_t (*irq_handler)(DRM_IRQ_ARGS);
- int (*vblank_wait)(struct drm_device *, unsigned int *);
- int (*vblank_wait2)(struct drm_device *, unsigned int *);
- /* added for intel minimized vblank */
- u32 (*get_vblank_counter)(struct drm_device *dev, int crtc);
- int (*enable_vblank)(struct drm_device *dev, int crtc);
- void (*disable_vblank)(struct drm_device *dev, int crtc);
-
- /*
- * Driver-specific constructor for drm_gem_objects, to set up
- * obj->driver_private.
- *
- * Returns 0 on success.
- */
- int (*gem_init_object) (struct drm_gem_object *obj);
- void (*gem_free_object) (struct drm_gem_object *obj);
-
-
- drm_ioctl_desc_t *driver_ioctls;
- int max_driver_ioctl;
-
- int buf_priv_size;
- int driver_major;
- int driver_minor;
- int driver_patchlevel;
- const char *driver_name; /* Simple driver name */
- const char *driver_desc; /* Longer driver name */
- const char *driver_date; /* Date of last major changes. */
-
- unsigned use_agp :1;
- unsigned require_agp :1;
- unsigned use_sg :1;
- unsigned use_dma :1;
- unsigned use_pci_dma :1;
- unsigned use_dma_queue :1;
- unsigned use_irq :1;
- unsigned use_vbl_irq :1;
- unsigned use_vbl_irq2 :1;
- unsigned use_mtrr :1;
- unsigned use_gem;
-};
-
-/*
- * hardware-specific code needs to initialize mutexes which
- * can be used in interrupt context, so they need to know
- * the interrupt priority. Interrupt cookie in drm_device
- * structure is the intr_block field.
- */
-#define DRM_INTR_PRI(dev) \
- DDI_INTR_PRI((dev)->intr_block)
-
-struct drm_device {
- drm_driver_t *driver;
- drm_cminor_list_t minordevs;
- dev_info_t *dip;
- void *drm_handle;
- int drm_supported;
- const char *desc; /* current driver description */
- kmutex_t *irq_mutex;
- kcondvar_t *irq_cv;
-
- ddi_iblock_cookie_t intr_block;
- uint32_t pci_device; /* PCI device id */
- uint32_t pci_vendor;
- char *unique; /* Unique identifier: e.g., busid */
- int unique_len; /* Length of unique field */
- int if_version; /* Highest interface version set */
- int flags; /* Flags to open(2) */
-
- /* Locks */
- kmutex_t vbl_lock; /* protects vblank operations */
- kmutex_t dma_lock; /* protects dev->dma */
- kmutex_t irq_lock; /* protects irq condition checks */
- kmutex_t dev_lock; /* protects everything else */
- drm_lock_data_t lock; /* Information on hardware lock */
- kmutex_t struct_mutex; /* < For others */
-
- /* Usage Counters */
- int open_count; /* Outstanding files open */
- int buf_use; /* Buffers in use -- cannot alloc */
-
- /* Performance counters */
- unsigned long counters;
- drm_stat_type_t types[15];
- uint32_t counts[15];
-
- /* Authentication */
- drm_file_list_t files;
- drm_magic_head_t magiclist[DRM_HASH_SIZE];
-
- /* Linked list of mappable regions. Protected by dev_lock */
- drm_map_list_t maplist;
-
- drm_local_map_t **context_sareas;
- int max_context;
-
- /* DMA queues (contexts) */
- drm_device_dma_t *dma; /* Optional pointer for DMA support */
-
- /* Context support */
- int irq; /* Interrupt used by board */
- int irq_enabled; /* True if the irq handler is enabled */
- int pci_domain;
- int pci_bus;
- int pci_slot;
- int pci_func;
- atomic_t context_flag; /* Context swapping flag */
- int last_context; /* Last current context */
-
- /* Only used for Radeon */
- atomic_t vbl_received;
- atomic_t vbl_received2;
-
- drm_vbl_sig_list_t vbl_sig_list;
- drm_vbl_sig_list_t vbl_sig_list2;
- /*
- * At load time, disabling the vblank interrupt won't be allowed since
- * old clients may not call the modeset ioctl and therefore misbehave.
- * Once the modeset ioctl *has* been called though, we can safely
- * disable them when unused.
- */
- int vblank_disable_allowed;
-
- wait_queue_head_t vbl_queue; /* vbl wait channel */
- /* vbl wait channel array */
- wait_queue_head_t *vbl_queues;
-
- /* number of VBLANK interrupts */
- /* (driver must alloc the right number of counters) */
- atomic_t *_vblank_count;
- /* signal list to send on VBLANK */
- struct drm_vbl_sig_list *vbl_sigs;
-
- /* number of signals pending on all crtcs */
- atomic_t vbl_signal_pending;
- /* number of users of vblank interrupts per crtc */
- atomic_t *vblank_refcount;
- /* protected by dev->vbl_lock, used for wraparound handling */
- u32 *last_vblank;
- /* so we don't call enable more than */
- atomic_t *vblank_enabled;
- /* Display driver is setting mode */
- int *vblank_inmodeset;
- /* Don't wait while crtc is likely disabled */
- int *vblank_suspend;
- /* size of vblank counter register */
- u32 max_vblank_count;
- int num_crtcs;
- kmutex_t tasklet_lock;
- void (*locked_tasklet_func)(struct drm_device *dev);
-
- pid_t buf_pgid;
- drm_agp_head_t *agp;
- drm_sg_mem_t *sg; /* Scatter gather memory */
- uint32_t *ctx_bitmap;
- void *dev_private;
- unsigned int agp_buffer_token;
- drm_local_map_t *agp_buffer_map;
-
- kstat_t *asoft_ksp; /* kstat support */
-
- /* name Drawable information */
- kmutex_t drw_lock;
- unsigned int drw_bitfield_length;
- u32 *drw_bitfield;
- unsigned int drw_info_length;
- drm_drawable_info_t **drw_info;
-
- /* \name GEM information */
- /* @{ */
- kmutex_t object_name_lock;
- struct idr_list object_name_idr;
- atomic_t object_count;
- atomic_t object_memory;
- atomic_t pin_count;
- atomic_t pin_memory;
- atomic_t gtt_count;
- atomic_t gtt_memory;
- uint32_t gtt_total;
- uint32_t invalidate_domains; /* domains pending invalidation */
- uint32_t flush_domains; /* domains pending flush */
- /* @} */
-
- /*
- * Saving S3 context
- */
- void *s3_private;
-};
-
-/* Memory management support (drm_memory.c) */
-void drm_mem_init(void);
-void drm_mem_uninit(void);
-void *drm_alloc(size_t, int);
-void *drm_calloc(size_t, size_t, int);
-void *drm_realloc(void *, size_t, size_t, int);
-void drm_free(void *, size_t, int);
-int drm_ioremap(drm_device_t *, drm_local_map_t *);
-void drm_ioremapfree(drm_local_map_t *);
-
-void drm_core_ioremap(struct drm_local_map *, struct drm_device *);
-void drm_core_ioremapfree(struct drm_local_map *, struct drm_device *);
-
-void drm_pci_free(drm_device_t *, drm_dma_handle_t *);
-void *drm_pci_alloc(drm_device_t *, size_t, size_t, dma_addr_t, int);
-
-struct drm_local_map *drm_core_findmap(struct drm_device *, unsigned long);
-
-int drm_context_switch(drm_device_t *, int, int);
-int drm_context_switch_complete(drm_device_t *, int);
-int drm_ctxbitmap_init(drm_device_t *);
-void drm_ctxbitmap_cleanup(drm_device_t *);
-void drm_ctxbitmap_free(drm_device_t *, int);
-int drm_ctxbitmap_next(drm_device_t *);
-
-/* Locking IOCTL support (drm_lock.c) */
-int drm_lock_take(drm_lock_data_t *, unsigned int);
-int drm_lock_transfer(drm_device_t *,
- drm_lock_data_t *, unsigned int);
-int drm_lock_free(drm_device_t *,
- volatile unsigned int *, unsigned int);
-
-/* Buffer management support (drm_bufs.c) */
-unsigned long drm_get_resource_start(drm_device_t *, unsigned int);
-unsigned long drm_get_resource_len(drm_device_t *, unsigned int);
-int drm_initmap(drm_device_t *, unsigned long, unsigned long,
- unsigned int, int, int);
-void drm_rmmap(drm_device_t *, drm_local_map_t *);
-int drm_addmap(drm_device_t *, unsigned long, unsigned long,
- drm_map_type_t, drm_map_flags_t, drm_local_map_t **);
-int drm_order(unsigned long);
-
-/* DMA support (drm_dma.c) */
-int drm_dma_setup(drm_device_t *);
-void drm_dma_takedown(drm_device_t *);
-void drm_free_buffer(drm_device_t *, drm_buf_t *);
-void drm_reclaim_buffers(drm_device_t *, drm_file_t *);
-#define drm_core_reclaim_buffers drm_reclaim_buffers
-
-/* IRQ support (drm_irq.c) */
-int drm_irq_install(drm_device_t *);
-int drm_irq_uninstall(drm_device_t *);
-uint_t drm_irq_handler(DRM_IRQ_ARGS);
-void drm_driver_irq_preinstall(drm_device_t *);
-void drm_driver_irq_postinstall(drm_device_t *);
-void drm_driver_irq_uninstall(drm_device_t *);
-int drm_vblank_wait(drm_device_t *, unsigned int *);
-void drm_vbl_send_signals(drm_device_t *);
-void drm_handle_vblank(struct drm_device *dev, int crtc);
-u32 drm_vblank_count(struct drm_device *dev, int crtc);
-int drm_vblank_get(struct drm_device *dev, int crtc);
-void drm_vblank_put(struct drm_device *dev, int crtc);
-int drm_vblank_init(struct drm_device *dev, int num_crtcs);
-void drm_vblank_cleanup(struct drm_device *dev);
-int drm_modeset_ctl(DRM_IOCTL_ARGS);
-
-/* AGP/GART support (drm_agpsupport.c) */
-int drm_device_is_agp(drm_device_t *);
-int drm_device_is_pcie(drm_device_t *);
-drm_agp_head_t *drm_agp_init(drm_device_t *);
-void drm_agp_fini(drm_device_t *);
-int drm_agp_do_release(drm_device_t *);
-void *drm_agp_allocate_memory(size_t pages,
- uint32_t type, drm_device_t *dev);
-int drm_agp_free_memory(agp_allocate_t *handle, drm_device_t *dev);
-int drm_agp_bind_memory(unsigned int, uint32_t, drm_device_t *);
-int drm_agp_unbind_memory(unsigned long, drm_device_t *);
-int drm_agp_bind_pages(drm_device_t *dev,
- pfn_t *pages,
- unsigned long num_pages,
- uint32_t gtt_offset);
-int drm_agp_unbind_pages(drm_device_t *dev,
- unsigned long num_pages,
- uint32_t gtt_offset,
- uint32_t type);
-void drm_agp_chipset_flush(struct drm_device *dev);
-void drm_agp_rebind(struct drm_device *dev);
-
-/* kstat support (drm_kstats.c) */
-int drm_init_kstats(drm_device_t *);
-void drm_fini_kstats(drm_device_t *);
-
-/* Scatter Gather Support (drm_scatter.c) */
-void drm_sg_cleanup(drm_device_t *, drm_sg_mem_t *);
-
-/* ATI PCIGART support (ati_pcigart.c) */
-int drm_ati_pcigart_init(drm_device_t *, drm_ati_pcigart_info *);
-int drm_ati_pcigart_cleanup(drm_device_t *, drm_ati_pcigart_info *);
-
-/* Locking IOCTL support (drm_drv.c) */
-int drm_lock(DRM_IOCTL_ARGS);
-int drm_unlock(DRM_IOCTL_ARGS);
-int drm_version(DRM_IOCTL_ARGS);
-int drm_setversion(DRM_IOCTL_ARGS);
-/* Cache management (drm_cache.c) */
-void drm_clflush_pages(caddr_t *pages, unsigned long num_pages);
-
-/* Misc. IOCTL support (drm_ioctl.c) */
-int drm_irq_by_busid(DRM_IOCTL_ARGS);
-int drm_getunique(DRM_IOCTL_ARGS);
-int drm_setunique(DRM_IOCTL_ARGS);
-int drm_getmap(DRM_IOCTL_ARGS);
-int drm_getclient(DRM_IOCTL_ARGS);
-int drm_getstats(DRM_IOCTL_ARGS);
-int drm_noop(DRM_IOCTL_ARGS);
-
-/* Context IOCTL support (drm_context.c) */
-int drm_resctx(DRM_IOCTL_ARGS);
-int drm_addctx(DRM_IOCTL_ARGS);
-int drm_modctx(DRM_IOCTL_ARGS);
-int drm_getctx(DRM_IOCTL_ARGS);
-int drm_switchctx(DRM_IOCTL_ARGS);
-int drm_newctx(DRM_IOCTL_ARGS);
-int drm_rmctx(DRM_IOCTL_ARGS);
-int drm_setsareactx(DRM_IOCTL_ARGS);
-int drm_getsareactx(DRM_IOCTL_ARGS);
-
-/* Drawable IOCTL support (drm_drawable.c) */
-int drm_adddraw(DRM_IOCTL_ARGS);
-int drm_rmdraw(DRM_IOCTL_ARGS);
-int drm_update_draw(DRM_IOCTL_ARGS);
-
-/* Authentication IOCTL support (drm_auth.c) */
-int drm_getmagic(DRM_IOCTL_ARGS);
-int drm_authmagic(DRM_IOCTL_ARGS);
-int drm_remove_magic(drm_device_t *, drm_magic_t);
-drm_file_t *drm_find_file(drm_device_t *, drm_magic_t);
-/* Buffer management support (drm_bufs.c) */
-int drm_addmap_ioctl(DRM_IOCTL_ARGS);
-int drm_rmmap_ioctl(DRM_IOCTL_ARGS);
-int drm_addbufs_ioctl(DRM_IOCTL_ARGS);
-int drm_infobufs(DRM_IOCTL_ARGS);
-int drm_markbufs(DRM_IOCTL_ARGS);
-int drm_freebufs(DRM_IOCTL_ARGS);
-int drm_mapbufs(DRM_IOCTL_ARGS);
-
-/* DMA support (drm_dma.c) */
-int drm_dma(DRM_IOCTL_ARGS);
-
-/* IRQ support (drm_irq.c) */
-int drm_control(DRM_IOCTL_ARGS);
-int drm_wait_vblank(DRM_IOCTL_ARGS);
-
-/* AGP/GART support (drm_agpsupport.c) */
-int drm_agp_acquire(DRM_IOCTL_ARGS);
-int drm_agp_release(DRM_IOCTL_ARGS);
-int drm_agp_enable(DRM_IOCTL_ARGS);
-int drm_agp_info(DRM_IOCTL_ARGS);
-int drm_agp_alloc(DRM_IOCTL_ARGS);
-int drm_agp_free(DRM_IOCTL_ARGS);
-int drm_agp_unbind(DRM_IOCTL_ARGS);
-int drm_agp_bind(DRM_IOCTL_ARGS);
-
-/* Scatter Gather Support (drm_scatter.c) */
-int drm_sg_alloc(DRM_IOCTL_ARGS);
-int drm_sg_free(DRM_IOCTL_ARGS);
-
-/* drm_mm.c */
-struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
- unsigned long size, unsigned alignment);
-struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment, int best_match);
-
-extern void drm_mm_clean_ml(const struct drm_mm *mm);
-extern int drm_debug_flag;
-
-/* We add function to support DRM_DEBUG,DRM_ERROR,DRM_INFO */
-extern void drm_debug(const char *fmt, ...);
-extern void drm_error(const char *fmt, ...);
-extern void drm_info(const char *fmt, ...);
-
-#ifdef DEBUG
-#define DRM_DEBUG if (drm_debug_flag >= 2) drm_debug
-#define DRM_INFO if (drm_debug_flag >= 1) drm_info
-#else
-#define DRM_DEBUG(...)
-#define DRM_INFO(...)
-#endif
-
-#define DRM_ERROR drm_error
-
-
-#define MAX_INSTNUMS 16
-
-extern int drm_dev_to_instance(dev_t);
-extern int drm_dev_to_minor(dev_t);
-extern void *drm_supp_register(dev_info_t *, drm_device_t *);
-extern int drm_supp_unregister(void *);
-
-extern int drm_open(drm_device_t *, drm_cminor_t *, int, int, cred_t *);
-extern int drm_close(drm_device_t *, int, int, int, cred_t *);
-extern int drm_attach(drm_device_t *);
-extern int drm_detach(drm_device_t *);
-extern int drm_probe(drm_device_t *, drm_pci_id_list_t *);
-
-extern int drm_pci_init(drm_device_t *);
-extern void drm_pci_end(drm_device_t *);
-extern int pci_get_info(drm_device_t *, int *, int *, int *);
-extern int pci_get_irq(drm_device_t *);
-extern int pci_get_vendor(drm_device_t *);
-extern int pci_get_device(drm_device_t *);
-
-extern struct drm_drawable_info *drm_get_drawable_info(drm_device_t *,
- drm_drawable_t);
-/* File Operations helpers (drm_fops.c) */
-extern drm_file_t *drm_find_file_by_proc(drm_device_t *, cred_t *);
-extern drm_cminor_t *drm_find_file_by_minor(drm_device_t *, int);
-extern int drm_open_helper(drm_device_t *, drm_cminor_t *, int, int,
- cred_t *);
-
-/* Graphics Execution Manager library functions (drm_gem.c) */
-int drm_gem_init(struct drm_device *dev);
-void drm_gem_object_free(struct drm_gem_object *obj);
-struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
- size_t size);
-void drm_gem_object_handle_free(struct drm_gem_object *obj);
-
-void drm_gem_object_reference(struct drm_gem_object *obj);
-void drm_gem_object_unreference(struct drm_gem_object *obj);
-
-int drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
- int *handlep);
-void drm_gem_object_handle_reference(struct drm_gem_object *obj);
-
-void drm_gem_object_handle_unreference(struct drm_gem_object *obj);
-
-struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp,
- int handle);
-int drm_gem_close_ioctl(DRM_IOCTL_ARGS);
-int drm_gem_flink_ioctl(DRM_IOCTL_ARGS);
-int drm_gem_open_ioctl(DRM_IOCTL_ARGS);
-void drm_gem_open(struct drm_file *file_private);
-void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
-
-
-#endif /* _DRMP_H */
diff --git a/usr/src/uts/common/io/drm/drm_agpsupport.c b/usr/src/uts/common/io/drm/drm_agpsupport.c
index ae695da..272ab43 100644
--- a/usr/src/uts/common/io/drm/drm_agpsupport.c
+++ b/usr/src/uts/common/io/drm/drm_agpsupport.c
@@ -1,16 +1,19 @@
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*/
-/*
- * drm_agpsupport.h -- DRM support for AGP/GART backend -*- linux-c -*-
- * Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com
+/**
+ * \file drm_agpsupport.c
+ * DRM support for AGP/GART backend
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
*/
+
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2013, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,11 +34,6 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Author:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
*/
#include "drm.h"
@@ -55,449 +53,495 @@
*/
#define DRM_AGP_KEY_OFFSET 8
-extern int drm_supp_device_capability(void *handle, int capid);
-
-/*ARGSUSED*/
-int
-drm_device_is_agp(drm_device_t *dev)
+void drm_agp_cleanup(struct drm_device *dev)
{
- int ret;
-
- if (dev->driver->device_is_agp != NULL) {
- /*
- * device_is_agp returns a tristate:
- * 0 = not AGP;
- * 1 = definitely AGP;
- * 2 = fall back to PCI capability
- */
- ret = (*dev->driver->device_is_agp)(dev);
- if (ret != DRM_MIGHT_BE_AGP)
- return (ret);
- }
-
- return (drm_supp_device_capability(dev->drm_handle, PCIY_AGP));
+ struct drm_agp_head *agp = dev->agp;
+ (void) ldi_close(agp->agpgart_lh, FEXCL, kcred);
+ ldi_ident_release(agp->agpgart_li);
}
-/*ARGSUSED*/
-int
-drm_device_is_pcie(drm_device_t *dev)
-{
- return (drm_supp_device_capability(dev->drm_handle, PCIY_EXPRESS));
-}
-
-
-/*ARGSUSED*/
-int
-drm_agp_info(DRM_IOCTL_ARGS)
+/**
+ * Get AGP information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a (output) drm_agp_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been initialized and acquired and fills in the
+ * drm_agp_info structure with the information in drm_agp_head::agp_info.
+ */
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
{
- DRM_DEVICE;
- agp_info_t *agpinfo;
- drm_agp_info_t info;
+ agp_info_t *agpinfo;
if (!dev->agp || !dev->agp->acquired)
- return (EINVAL);
+ return -EINVAL;
agpinfo = &dev->agp->agp_info;
- info.agp_version_major = agpinfo->agpi_version.agpv_major;
- info.agp_version_minor = agpinfo->agpi_version.agpv_minor;
- info.mode = agpinfo->agpi_mode;
- info.aperture_base = agpinfo->agpi_aperbase;
- info.aperture_size = agpinfo->agpi_apersize* 1024 * 1024;
- info.memory_allowed = agpinfo->agpi_pgtotal << PAGE_SHIFT;
- info.memory_used = agpinfo->agpi_pgused << PAGE_SHIFT;
- info.id_vendor = agpinfo->agpi_devid & 0xffff;
- info.id_device = agpinfo->agpi_devid >> 16;
-
- DRM_COPYTO_WITH_RETURN((void *)data, &info, sizeof (info));
- return (0);
+ info->agp_version_major = agpinfo->agpi_version.agpv_major;
+ info->agp_version_minor = agpinfo->agpi_version.agpv_minor;
+ info->mode = agpinfo->agpi_mode;
+ info->aperture_base = agpinfo->agpi_aperbase;
+ info->aperture_size = agpinfo->agpi_apersize * 1024 * 1024;
+ info->memory_allowed = agpinfo->agpi_pgtotal << PAGE_SHIFT;
+ info->memory_used = agpinfo->agpi_pgused << PAGE_SHIFT;
+ info->id_vendor = agpinfo->agpi_devid & 0xffff;
+ info->id_device = agpinfo->agpi_devid >> 16;
+
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_agp_acquire(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_agp_info_ioctl(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- int ret, rval;
+ struct drm_agp_info *info = data;
+ int err;
- if (!dev->agp) {
- DRM_ERROR("drm_agp_acquire : agp isn't initialized yet");
- return (ENODEV);
- }
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_ACQUIRE,
- (uintptr_t)0, FKIOCTL, kcred, &rval);
- if (ret) {
- DRM_ERROR("drm_agp_acquired: AGPIOC_ACQUIRE failed\n");
- return (EIO);
- }
- dev->agp->acquired = 1;
+ err = drm_agp_info(dev, info);
+ if (err)
+ return err;
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_agp_release(DRM_IOCTL_ARGS)
+/**
+ * Acquire the AGP device.
+ *
+ * \param dev DRM device that is to acquire AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire(struct drm_device * dev)
{
- DRM_DEVICE;
- int ret, rval;
-
if (!dev->agp)
- return (ENODEV);
- if (!dev->agp->acquired)
- return (EBUSY);
-
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_RELEASE,
- (intptr_t)0, FKIOCTL, kcred, &rval);
- if (ret) {
- DRM_ERROR("drm_agp_release: AGPIOC_RELEASE failed\n");
- return (ENXIO);
+ return -ENODEV;
+ if (dev->agp->acquired)
+ return -EBUSY;
+ {
+ int ret, rval;
+ if (ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_ACQUIRE,
+ (uintptr_t)0, FKIOCTL, kcred, &rval)) {
+ DRM_ERROR("AGPIOC_ACQUIRE failed");
+ return -ret;
+ }
}
- dev->agp->acquired = 0;
-
- return (ret);
+ dev->agp->acquired = 1;
+ return 0;
}
-
-int
-drm_agp_do_release(drm_device_t *dev)
+/**
+ * Acquire the AGP device (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+/* LINTED */
+int drm_agp_acquire_ioctl(DRM_IOCTL_ARGS)
{
- int ret, rval;
-
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_RELEASE,
- (intptr_t)0, FKIOCTL, kcred, &rval);
-
- if (ret == 0)
- dev->agp->acquired = 0;
-
- return (ret);
+ return drm_agp_acquire((struct drm_device *) file->minor->dev);
}
-/*ARGSUSED*/
-int
-drm_agp_enable(DRM_IOCTL_ARGS)
+/**
+ * Release the AGP device.
+ *
+ * \param dev DRM device that is to release AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired and calls \c agp_backend_release.
+ */
+int drm_agp_release(struct drm_device * dev)
{
- DRM_DEVICE;
- drm_agp_mode_t modes;
- agp_setup_t setup;
- int ret, rval;
-
- if (!dev->agp)
- return (ENODEV);
- if (!dev->agp->acquired)
- return (EBUSY);
-
- DRM_COPYFROM_WITH_RETURN(&modes, (void *)data, sizeof (modes));
-
- dev->agp->mode = modes.mode;
- setup.agps_mode = (uint32_t)modes.mode;
-
+ if (!dev->agp || !dev->agp->acquired)
+ return -EINVAL;
+ {
+ int ret, rval;
+ if (ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_RELEASE,
+ (intptr_t)0, FKIOCTL, kcred, &rval)) {
+ DRM_ERROR("AGPIOC_RELEASE failed");
+ return -ret;
+ }
+ }
+ dev->agp->acquired = 0;
+ return 0;
+}
- DRM_DEBUG("drm_agp_enable: dev->agp->mode=%lx", modes.mode);
+/* LINTED */
+int drm_agp_release_ioctl(DRM_IOCTL_ARGS)
+{
+ return drm_agp_release(dev);
+}
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_SETUP,
- (intptr_t)&setup, FKIOCTL, kcred, &rval);
- if (ret) {
- DRM_ERROR("drm_agp_enable: failed");
- return (EIO);
+/**
+ * Enable the AGP bus.
+ *
+ * \param dev DRM device that has previously acquired AGP.
+ * \param mode Requested AGP mode.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired but not enabled, and calls
+ * \c agp_enable.
+ */
+int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
+{
+ if (!dev->agp || !dev->agp->acquired)
+ return -EINVAL;
+
+ dev->agp->mode = mode.mode;
+ {
+ agp_setup_t setup;
+ int ret, rval;
+ setup.agps_mode = (uint32_t)mode.mode;
+ if (ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_SETUP,
+ (intptr_t)&setup, FKIOCTL, kcred, &rval)) {
+ DRM_ERROR("AGPIOC_SETUP failed");
+ return -ret;
+ }
}
-
- dev->agp->base = dev->agp->agp_info.agpi_aperbase;
dev->agp->enabled = 1;
+ return 0;
+}
- DRM_DEBUG("drm_agp_enable: dev->agp->base=0x%lx", dev->agp->base);
- return (0);
+/* LINTED */
+int drm_agp_enable_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_agp_mode *mode = data;
+
+ return drm_agp_enable(dev, *mode);
}
-/*ARGSUSED*/
-int
-drm_agp_alloc(DRM_IOCTL_ARGS)
+/**
+ * Allocate AGP memory.
+ *
+ * \param inode device inode.
+ * \param file_priv file private pointer.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired, allocates the
+ * memory via alloc_agp() and creates a drm_agp_mem entry for it.
+ */
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
- DRM_DEVICE;
- drm_agp_mem_t *entry;
- agp_allocate_t alloc;
- drm_agp_buffer_t request;
- int pages;
+ struct drm_agp_mem *entry;
+ agp_allocate_t alloc;
+ unsigned long pages;
int ret, rval;
if (!dev->agp || !dev->agp->acquired)
- return (EINVAL);
+ return -EINVAL;
+ if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
+ return -ENOMEM;
- DRM_COPYFROM_WITH_RETURN(&request, (void *)data, sizeof (request));
+ (void) memset(entry, 0, sizeof(*entry));
- entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
+ pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- pages = btopr(request.size);
- alloc.agpa_pgcount = pages;
+ alloc.agpa_pgcount = (uint32_t) pages;
alloc.agpa_type = AGP_NORMAL;
ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_ALLOCATE,
(intptr_t)&alloc, FKIOCTL, kcred, &rval);
if (ret) {
- DRM_ERROR("drm_agp_alloc: AGPIOC_ALLOCATE failed, ret=%d", ret);
- kmem_free(entry, sizeof (*entry));
- return (ret);
+ DRM_ERROR("AGPIOC_ALLOCATE failed");
+ kfree(entry, sizeof (*entry));
+ return -ret;
}
+ entry->handle = alloc.agpa_key + DRM_AGP_KEY_OFFSET;
entry->bound = 0;
- entry->pages = pages;
- entry->handle = (void*)(uintptr_t)(alloc.agpa_key + DRM_AGP_KEY_OFFSET);
- entry->prev = NULL;
- entry->phys_addr = (void*)(uintptr_t)alloc.agpa_physical;
- entry->next = dev->agp->memory;
- if (dev->agp->memory)
- dev->agp->memory->prev = entry;
- dev->agp->memory = entry;
-
- DRM_DEBUG("entry->phys_addr %lx", entry->phys_addr);
-
- /* physical is used only by i810 driver */
- request.physical = alloc.agpa_physical;
- request.handle = (unsigned long)entry->handle;
-
- /*
- * If failed to ddi_copyout(), we will free allocated AGP memory
- * when closing drm
- */
- DRM_COPYTO_WITH_RETURN((void *)data, &request, sizeof (request));
-
- return (0);
+ entry->pages = (int) pages;
+ list_add(&entry->head, &dev->agp->memory, (caddr_t)entry);
+
+ request->handle = entry->handle;
+ request->physical = alloc.agpa_physical;
+
+ return 0;
}
-/*ARGSUSED*/
-static drm_agp_mem_t *
-drm_agp_lookup_entry(drm_device_t *dev, void *handle)
+/* LINTED */
+int drm_agp_alloc_ioctl(DRM_IOCTL_ARGS)
{
- drm_agp_mem_t *entry;
+ struct drm_agp_buffer *request = data;
- for (entry = dev->agp->memory; entry; entry = entry->next) {
+ return drm_agp_alloc(dev, request);
+}
+
+/**
+ * Search for the AGP memory entry associated with a handle.
+ *
+ * \param dev DRM device structure.
+ * \param handle AGP memory handle.
+ * \return pointer to the drm_agp_mem structure associated with \p handle.
+ *
+ * Walks through drm_agp_head::memory until finding a matching handle.
+ */
+static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
+ unsigned long handle)
+{
+ struct drm_agp_mem *entry;
+
+ list_for_each_entry(entry, struct drm_agp_mem, &dev->agp->memory, head) {
if (entry->handle == handle)
- return (entry);
+ return entry;
}
-
- return (NULL);
+ return NULL;
}
-/*ARGSUSED*/
-int
-drm_agp_unbind(DRM_IOCTL_ARGS)
+/**
+ * Unbind AGP memory from the GATT (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and acquired, looks-up the AGP memory
+ * entry and passes it to the unbind_agp() function.
+ */
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
{
- DRM_DEVICE;
- agp_unbind_t unbind;
- drm_agp_binding_t request;
- drm_agp_mem_t *entry;
- int ret, rval;
+ struct drm_agp_mem *entry;
+ int ret;
if (!dev->agp || !dev->agp->acquired)
- return (EINVAL);
-
- DRM_COPYFROM_WITH_RETURN(&request, (void *)data, sizeof (request));
-
- if (!(entry = drm_agp_lookup_entry(dev, (void *)request.handle)))
- return (EINVAL);
+ return -EINVAL;
+ if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+ return -EINVAL;
if (!entry->bound)
- return (EINVAL);
-
- unbind.agpu_pri = 0;
- unbind.agpu_key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
-
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_UNBIND,
- (intptr_t)&unbind, FKIOCTL, kcred, &rval);
- if (ret) {
- DRM_ERROR("drm_agp_unbind: AGPIOC_UNBIND failed");
- return (EIO);
+ return -EINVAL;
+ {
+ agp_unbind_t unbind;
+ int rval;
+ unbind.agpu_pri = 0;
+ unbind.agpu_key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
+ if (ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_UNBIND,
+ (intptr_t)&unbind, FKIOCTL, kcred, &rval)) {
+ DRM_ERROR("AGPIOC_UNBIND failed");
+ return -ret;
+ }
}
- entry->bound = 0;
- return (0);
+ entry->bound = 0;
+ return ret;
}
-/*ARGSUSED*/
-int
-drm_agp_bind(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_agp_unbind_ioctl(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_agp_binding_t request;
- drm_agp_mem_t *entry;
- int start;
- uint_t key;
-
- if (!dev->agp || !dev->agp->acquired)
- return (EINVAL);
+ struct drm_agp_binding *request = data;
- DRM_COPYFROM_WITH_RETURN(&request, (void *)data, sizeof (request));
+ return drm_agp_unbind(dev, request);
+}
- entry = drm_agp_lookup_entry(dev, (void *)request.handle);
- if (!entry || entry->bound)
- return (EINVAL);
+/**
+ * Bind AGP memory into the GATT (ioctl)
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and that no memory
+ * is currently bound into the GATT. Looks-up the AGP memory entry and passes
+ * it to bind_agp() function.
+ */
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+ struct drm_agp_mem *entry;
+ int retcode;
+ int page;
- key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
- start = btopr(request.offset);
- if (drm_agp_bind_memory(key, start, dev)) {
- DRM_ERROR("drm_agp_bind: failed key=%x, start=0x%x, "
- "agp_base=0x%lx", key, start, dev->agp->base);
- return (EIO);
+ if (!dev->agp || !dev->agp->acquired)
+ return -EINVAL;
+ if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+ return -EINVAL;
+ if (entry->bound)
+ return -EINVAL;
+ page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
+ {
+ uint_t key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
+ if (retcode = drm_agp_bind_memory(key, page, dev)) {
+ DRM_ERROR("failed key=0x%x, page=0x%x, "
+ "agp_base=0x%lx", key, page, dev->agp->base);
+ return retcode;
+ }
}
+ entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+ DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
+ dev->agp->base, entry->bound);
+ return 0;
+}
- entry->bound = dev->agp->base + (start << AGP_PAGE_SHIFT);
+/* LINTED */
+int drm_agp_bind_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_agp_binding *request = data;
- return (0);
+ return drm_agp_bind(dev, request);
}
-/*ARGSUSED*/
-int
-drm_agp_free(DRM_IOCTL_ARGS)
+/**
+ * Free AGP memory (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and looks up the
+ * AGP memory entry. If the memory it's currently bound, unbind it via
+ * unbind_agp(). Frees it via free_agp() as well as the entry itself
+ * and unlinks from the doubly linked list it's inserted in.
+ */
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
{
- DRM_DEVICE;
- drm_agp_buffer_t request;
- drm_agp_mem_t *entry;
- int ret, rval;
- int agpu_key;
+ struct drm_agp_mem *entry;
- DRM_COPYFROM_WITH_RETURN(&request, (void *)data, sizeof (request));
if (!dev->agp || !dev->agp->acquired)
- return (EINVAL);
- if (!(entry = drm_agp_lookup_entry(dev, (void *)request.handle)))
- return (EINVAL);
+ return -EINVAL;
+ if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+ return -EINVAL;
if (entry->bound)
- (void) drm_agp_unbind_memory(request.handle, dev);
-
- if (entry == dev->agp->memory)
- dev->agp->memory = entry->next;
- if (entry->prev)
- entry->prev->next = entry->next;
- if (entry->next)
- entry->next->prev = entry->prev;
-
- agpu_key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_DEALLOCATE,
- (intptr_t)agpu_key, FKIOCTL, kcred, &rval);
- if (ret) {
- DRM_ERROR("drm_agp_free: AGPIOC_DEALLOCATE failed,"
- "akey=%d, ret=%d", agpu_key, ret);
- return (EIO);
+ (void) drm_agp_unbind_memory(request->handle, dev);
+
+ list_del(&entry->head);
+ {
+ int agpu_key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
+ int ret, rval;
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_DEALLOCATE,
+ (intptr_t)agpu_key, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("AGPIOC_DEALLOCATE failed,"
+ "akey=%d, ret=%d", agpu_key, ret);
+ return -ret;
+ }
}
- drm_free(entry, sizeof (*entry), DRM_MEM_AGPLISTS);
- return (0);
+ kfree(entry, sizeof (*entry));
+ return 0;
}
-/*ARGSUSED*/
-drm_agp_head_t *
-drm_agp_init(drm_device_t *dev)
+/* LINTED */
+int drm_agp_free_ioctl(DRM_IOCTL_ARGS)
{
- drm_agp_head_t *agp = NULL;
- int retval, rval;
+ struct drm_agp_buffer *request = data;
+
+ return drm_agp_free(dev, request);
+}
- agp = kmem_zalloc(sizeof (drm_agp_head_t), KM_SLEEP);
+/**
+ * Initialize the AGP resources.
+ *
+ * \return pointer to a drm_agp_head structure.
+ *
+ * Gets the drm_agp_t structure which is made available by the agpgart module
+ * via the inter_module_* functions. Creates and initializes a drm_agp_head
+ * structure.
+ */
+struct drm_agp_head *drm_agp_init(struct drm_device *dev)
+{
+ struct drm_agp_head *head = NULL;
+ int ret, rval;
- retval = ldi_ident_from_dip(dev->dip, &agp->agpgart_li);
- if (retval != 0) {
- DRM_ERROR("drm_agp_init: failed to get layerd ident, retval=%d",
- retval);
+ if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
+ return NULL;
+ (void) memset((void *)head, 0, sizeof(*head));
+ ret = ldi_ident_from_dip(dev->devinfo, &head->agpgart_li);
+ if (ret) {
+ DRM_ERROR("failed to get layerd ident, ret=%d", ret);
goto err_1;
}
- retval = ldi_open_by_name(AGP_DEVICE, FEXCL, kcred,
- &agp->agpgart_lh, agp->agpgart_li);
- if (retval != 0) {
- DRM_ERROR("drm_agp_init: failed to open %s, retval=%d",
- AGP_DEVICE, retval);
+ ret = ldi_open_by_name(AGP_DEVICE, FEXCL, kcred,
+ &head->agpgart_lh, head->agpgart_li);
+ if (ret) {
+ DRM_ERROR("failed to open %s, ret=%d", AGP_DEVICE, ret);
goto err_2;
}
- retval = ldi_ioctl(agp->agpgart_lh, AGPIOC_INFO,
- (intptr_t)&agp->agp_info, FKIOCTL, kcred, &rval);
-
- if (retval != 0) {
- DRM_ERROR("drm_agp_init: failed to get agpinfo, retval=%d",
- retval);
+ ret = ldi_ioctl(head->agpgart_lh, AGPIOC_INFO,
+ (intptr_t)&head->agp_info, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("failed to get agpinfo, ret=%d", ret);
goto err_3;
}
-
- return (agp);
+ INIT_LIST_HEAD(&head->memory);
+ head->base = head->agp_info.agpi_aperbase;
+ return head;
err_3:
- (void) ldi_close(agp->agpgart_lh, FEXCL, kcred);
-
+ (void) ldi_close(head->agpgart_lh, FEXCL, kcred);
err_2:
- ldi_ident_release(agp->agpgart_li);
-
+ ldi_ident_release(head->agpgart_li);
err_1:
- kmem_free(agp, sizeof (drm_agp_head_t));
- return (NULL);
+ kfree(head, sizeof(*head));
+ return NULL;
}
-/*ARGSUSED*/
-void
-drm_agp_fini(drm_device_t *dev)
+/* LINTED */
+void *drm_agp_allocate_memory(size_t pages, uint32_t type, struct drm_device *dev)
{
- drm_agp_head_t *agp = dev->agp;
- (void) ldi_close(agp->agpgart_lh, FEXCL, kcred);
- ldi_ident_release(agp->agpgart_li);
- kmem_free(agp, sizeof (drm_agp_head_t));
- dev->agp = NULL;
+ return NULL;
}
-
-/*ARGSUSED*/
-void *
-drm_agp_allocate_memory(size_t pages, uint32_t type, drm_device_t *dev)
-{
- return (NULL);
-}
-
-/*ARGSUSED*/
-int
-drm_agp_free_memory(agp_allocate_t *handle, drm_device_t *dev)
+/* LINTED */
+int drm_agp_free_memory(agp_allocate_t *handle, struct drm_device *dev)
{
- return (1);
+ return 1;
}
-/*ARGSUSED*/
-int
-drm_agp_bind_memory(unsigned int key, uint32_t start, drm_device_t *dev)
+int drm_agp_bind_memory(unsigned int key, uint32_t start, struct drm_device *dev)
{
agp_bind_t bind;
- int ret, rval;
+ int ret, rval;
bind.agpb_pgstart = start;
bind.agpb_key = key;
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_BIND,
- (intptr_t)&bind, FKIOCTL, kcred, &rval);
- if (ret) {
- DRM_DEBUG("drm_agp_bind_meory: AGPIOC_BIND failed");
- return (EIO);
+ if (ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_BIND,
+ (intptr_t)&bind, FKIOCTL, kcred, &rval)) {
+ DRM_DEBUG("AGPIOC_BIND failed");
+ return -ret;
}
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_agp_unbind_memory(unsigned long handle, drm_device_t *dev)
+int drm_agp_unbind_memory(unsigned long handle, struct drm_device *dev)
{
+ struct drm_agp_mem *entry;
agp_unbind_t unbind;
- drm_agp_mem_t *entry;
int ret, rval;
if (!dev->agp || !dev->agp->acquired)
- return (EINVAL);
+ return -EINVAL;
- entry = drm_agp_lookup_entry(dev, (void *)handle);
+ entry = drm_agp_lookup_entry(dev, handle);
if (!entry || !entry->bound)
- return (EINVAL);
+ return -EINVAL;
unbind.agpu_pri = 0;
unbind.agpu_key = (uintptr_t)entry->handle - DRM_AGP_KEY_OFFSET;
-
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_UNBIND,
- (intptr_t)&unbind, FKIOCTL, kcred, &rval);
- if (ret) {
- DRM_ERROR("drm_agp_unbind: AGPIO_UNBIND failed");
- return (EIO);
+ if (ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_UNBIND,
+ (intptr_t)&unbind, FKIOCTL, kcred, &rval)) {
+ DRM_ERROR("AGPIO_UNBIND failed");
+ return -ret;
}
entry->bound = 0;
- return (0);
+ return 0;
}
-/*
+/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
*
@@ -505,83 +549,85 @@ drm_agp_unbind_memory(unsigned long handle, drm_device_t *dev)
* caller to handle that.
*/
int
-drm_agp_bind_pages(drm_device_t *dev,
+drm_agp_bind_pages(struct drm_device *dev,
pfn_t *pages,
unsigned long num_pages,
- uint32_t gtt_offset)
+ uint32_t gtt_offset,
+ unsigned int agp_type)
{
+ agp_gtt_info_t bind;
+ int ret, rval;
- agp_bind_pages_t bind;
- int ret, rval;
+ bind.agp_pgstart = gtt_offset / AGP_PAGE_SIZE;
+ bind.agp_npage = num_pages;
+ bind.agp_phyaddr = pages;
+ bind.agp_flags = agp_type;
- bind.agpb_pgstart = gtt_offset / AGP_PAGE_SIZE;
- bind.agpb_pgcount = num_pages;
- bind.agpb_pages = pages;
ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_PAGES_BIND,
(intptr_t)&bind, FKIOCTL, kcred, &rval);
if (ret) {
DRM_ERROR("AGPIOC_PAGES_BIND failed ret %d", ret);
- return (ret);
+ return -ret;
}
- return (0);
+ return 0;
}
int
-drm_agp_unbind_pages(drm_device_t *dev,
+drm_agp_unbind_pages(struct drm_device *dev,
+ pfn_t *pages,
unsigned long num_pages,
uint32_t gtt_offset,
+ pfn_t scratch,
uint32_t type)
{
+ agp_gtt_info_t unbind;
+ int ret, rval;
- agp_unbind_pages_t unbind;
- int ret, rval;
+ unbind.agp_pgstart = gtt_offset / AGP_PAGE_SIZE;
+ unbind.agp_npage = num_pages;
+ unbind.agp_type = type;
+ unbind.agp_phyaddr = pages;
+ unbind.agp_scratch = scratch;
- unbind.agpb_pgstart = gtt_offset / AGP_PAGE_SIZE;
- unbind.agpb_pgcount = num_pages;
- unbind.agpb_type = type;
ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_PAGES_UNBIND,
(intptr_t)&unbind, FKIOCTL, kcred, &rval);
if (ret) {
- DRM_DEBUG("drm_agp_unbind_pages AGPIOC_PAGES_UNBIND failed");
- return (ret);
+ DRM_ERROR("AGPIOC_PAGES_UNBIND failed %d", ret);
+ return -ret;
}
- return (0);
+ return 0;
}
-/*
- * Certain Intel chipsets contains a global write buffer, and this can require
- * flushing from the drm or X.org to make sure all data has hit RAM before
- * initiating a GPU transfer, due to a lack of coherency with the integrated
- * graphics device and this buffer.
- */
-void
-drm_agp_chipset_flush(struct drm_device *dev)
+void drm_agp_chipset_flush(struct drm_device *dev)
{
int ret, rval;
- DRM_DEBUG("agp_chipset_flush");
ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_FLUSHCHIPSET,
(intptr_t)0, FKIOCTL, kcred, &rval);
- if (ret != 0) {
- DRM_ERROR("Failed to drm_agp_chipset_flush ret %d", ret);
- }
+ if (ret)
+ DRM_ERROR("AGPIOC_FLUSHCHIPSET failed, ret=%d", ret);
}
-/*
- * The pages are evict on suspend, so re-bind it at resume time
- */
-void
-drm_agp_rebind(struct drm_device *dev)
+int
+drm_agp_rw_gtt(struct drm_device *dev,
+ unsigned long num_pages,
+ uint32_t gtt_offset,
+ void *gttp,
+ uint32_t type)
{
+ agp_rw_gtt_t gtt_info;
int ret, rval;
- if (!dev->agp) {
- return;
- }
-
- ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_PAGES_REBIND,
- (intptr_t)0, FKIOCTL, kcred, &rval);
- if (ret != 0) {
- DRM_ERROR("rebind failed %d", ret);
+ gtt_info.pgstart = gtt_offset / AGP_PAGE_SIZE;
+ gtt_info.pgcount = num_pages;
+ gtt_info.addr = gttp;
+ /* read = 0 write = 1 */
+ gtt_info.type = type;
+ ret = ldi_ioctl(dev->agp->agpgart_lh, AGPIOC_RW_GTT,
+ (intptr_t)&gtt_info, FKIOCTL, kcred, &rval);
+ if (ret) {
+ DRM_ERROR("AGPIOC_RW_GTT failed %d", ret);
+ return -ret;
}
+ return 0;
}
diff --git a/usr/src/uts/common/io/drm/drm_auth.c b/usr/src/uts/common/io/drm/drm_auth.c
index 23fec5e..c6c0a66 100644
--- a/usr/src/uts/common/io/drm/drm_auth.c
+++ b/usr/src/uts/common/io/drm/drm_auth.c
@@ -1,13 +1,22 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
*/
/*
- * drm_auth.h -- IOCTLs for authentication -*- linux-c -*-
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
*/
+
+/**
+ * \file drm_auth.c
+ * IOCTLs for authentication
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
/*
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -30,144 +39,199 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "drmP.h"
-static int
-drm_hash_magic(drm_magic_t magic)
+/**
+ * Generate a hash key from a magic.
+ *
+ * \param magic magic.
+ * \return hash key.
+ *
+ * The key is the modulus of the hash table size, #DRM_HASH_SIZE, which must be
+ * a power of 2.
+ */
+static int drm_hash_magic(drm_magic_t magic)
{
- return (magic & (DRM_HASH_SIZE-1));
+ return magic & (DRM_HASH_SIZE - 1);
}
-drm_file_t *
-drm_find_file(drm_device_t *dev, drm_magic_t magic)
+/**
+ * Find the file with the given magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches in drm_device::magiclist within all files with the same hash key
+ * the one with matching magic number, while holding the drm_device::struct_mutex
+ * lock.
+ */
+static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
{
- drm_file_t *retval = NULL;
- drm_magic_entry_t *pt;
- int hash;
+ struct drm_file *retval = NULL;
+ struct drm_magic_entry *pt;
+ int hash = drm_hash_magic(magic);
+ struct drm_device *dev = master->minor->dev;
- hash = drm_hash_magic(magic);
- for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
+ mutex_lock(&dev->struct_mutex);
+ for (pt = master->magiclist[hash].head; pt; pt = pt->next) {
if (pt->magic == magic) {
retval = pt->priv;
break;
}
}
-
- return (retval);
+ mutex_unlock(&dev->struct_mutex);
+ return retval;
}
-static int
-drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
+/**
+ * Adds a magic number.
+ *
+ * \param dev DRM device.
+ * \param priv file private data.
+ * \param magic magic number.
+ *
+ * Creates a drm_magic_entry structure and appends to the linked list
+ * associated the magic number hash key in drm_device::magiclist, while holding
+ * the drm_device::struct_mutex lock.
+ */
+static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
+ drm_magic_t magic)
{
- int hash;
- drm_magic_entry_t *entry;
+ int hash;
+ struct drm_magic_entry *entry;
+ struct drm_device *dev = master->minor->dev;
+
+ DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic);
- entry = drm_alloc(sizeof (*entry), DRM_MEM_MAGIC);
+ entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
if (!entry)
- return (ENOMEM);
+ return -ENOMEM;
+ (void) memset(entry, 0, sizeof(*entry));
entry->magic = magic;
- entry->priv = priv;
- entry->next = NULL;
+ entry->priv = priv;
+ entry->next = NULL;
- DRM_LOCK();
- if (dev->magiclist[hash].tail) {
- dev->magiclist[hash].tail->next = entry;
- dev->magiclist[hash].tail = entry;
+ mutex_lock(&dev->struct_mutex);
+ if (master->magiclist[hash].tail) {
+ master->magiclist[hash].tail->next = entry;
+ master->magiclist[hash].tail = entry;
} else {
- dev->magiclist[hash].head = entry;
- dev->magiclist[hash].tail = entry;
+ master->magiclist[hash].head = entry;
+ master->magiclist[hash].tail = entry;
}
- DRM_UNLOCK();
+ mutex_unlock(&dev->struct_mutex);
- return (0);
+ return 0;
}
-int
-drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
+/**
+ * Remove a magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches and unlinks the entry in drm_device::magiclist with the magic
+ * number hash key, while holding the drm_device::struct_mutex lock.
+ */
+int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
{
- drm_magic_entry_t *prev = NULL;
- drm_magic_entry_t *pt;
- int hash;
+ struct drm_magic_entry *prev = NULL;
+ struct drm_magic_entry *pt;
+ int hash;
+ struct drm_device *dev = master->minor->dev;
- DRM_DEBUG("drm_remove_magic : %d", magic);
+ DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic);
- DRM_LOCK();
- for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
+ mutex_lock(&dev->struct_mutex);
+ for (pt = master->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
if (pt->magic == magic) {
- if (dev->magiclist[hash].head == pt) {
- dev->magiclist[hash].head = pt->next;
+ if (master->magiclist[hash].head == pt) {
+ master->magiclist[hash].head = pt->next;
}
- if (dev->magiclist[hash].tail == pt) {
- dev->magiclist[hash].tail = prev;
+ if (master->magiclist[hash].tail == pt) {
+ master->magiclist[hash].tail = prev;
}
if (prev) {
prev->next = pt->next;
}
- DRM_UNLOCK();
+ mutex_unlock(&dev->struct_mutex);
drm_free(pt, sizeof (*pt), DRM_MEM_MAGIC);
- return (0);
+ return 0;
}
}
- DRM_UNLOCK();
+ mutex_unlock(&dev->struct_mutex);
- return (EINVAL);
+ return -EINVAL;
}
-/*ARGSUSED*/
-int
-drm_getmagic(DRM_IOCTL_ARGS)
+/**
+ * Get a unique magic number (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a resulting drm_auth structure.
+ * \return zero on success, or a negative number on failure.
+ *
+ * If there is a magic number in drm_file::magic then use it, otherwise
+ * searches an unique non-zero magic number and add it associating it with \p
+ * file_priv.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
+ */
+/* LINTED */
+int drm_getmagic(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
static drm_magic_t sequence = 0;
- drm_auth_t auth;
+ struct drm_auth *auth = data;
/* Find unique magic */
- if (fpriv->magic) {
- auth.magic = fpriv->magic;
+ if (file->magic) {
+ auth->magic = file->magic;
} else {
do {
int old = sequence;
- auth.magic = old+1;
- if (!atomic_cmpset_int(&sequence, old, auth.magic))
+ auth->magic = old + 1;
+ if (!atomic_cmpset_int(&sequence, old, auth->magic))
continue;
- } while (drm_find_file(dev, auth.magic));
- fpriv->magic = auth.magic;
- (void) drm_add_magic(dev, fpriv, auth.magic);
+ } while (drm_find_file(file->master, auth->magic));
+ file->magic = auth->magic;
+ (void) drm_add_magic(file->master, file, auth->magic);
}
+ DRM_DEBUG("%u\n", auth->magic);
- DRM_DEBUG("drm_getmagic: %u", auth.magic);
-
- DRM_COPYTO_WITH_RETURN((void *)data, &auth, sizeof (auth));
-
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_authmagic(DRM_IOCTL_ARGS)
+/**
+ * Authenticate with a magic.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_auth structure.
+ * \return zero if authentication successed, or a negative number otherwise.
+ *
+ * Checks if \p file_priv is associated with the magic number passed in \arg.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
+ */
+/* LINTED */
+int drm_authmagic(DRM_IOCTL_ARGS)
{
- drm_auth_t auth;
- drm_file_t *file;
- DRM_DEVICE;
-
- DRM_COPYFROM_WITH_RETURN(&auth, (void *)data, sizeof (auth));
-
- if ((file = drm_find_file(dev, auth.magic))) {
- file->authenticated = 1;
- (void) drm_remove_magic(dev, auth.magic);
- return (0);
+ struct drm_auth *auth = data;
+ struct drm_file *file_priv;
+
+ DRM_DEBUG("%u\n", auth->magic);
+ if ((file_priv = drm_find_file(file->master, auth->magic))) {
+ file_priv->authenticated = 1;
+ (void) drm_remove_magic(file->master, auth->magic);
+ return 0;
}
- return (EINVAL);
+ return -EINVAL;
}
diff --git a/usr/src/uts/common/io/drm/drm_bufs.c b/usr/src/uts/common/io/drm/drm_bufs.c
index ec01d37..2023f1d 100644
--- a/usr/src/uts/common/io/drm/drm_bufs.c
+++ b/usr/src/uts/common/io/drm/drm_bufs.c
@@ -1,11 +1,21 @@
/*
- * drm_bufs.h -- Generic buffer template -*- linux-c -*-
- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*/
+
+/**
+ * \file drm_bufs.c
+ * Generic buffer template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2012, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,323 +36,465 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
*/
#include "drmP.h"
-#include <gfx_private.h>
#include "drm_io32.h"
+#ifdef _LP64
+extern caddr_t smmap64(caddr_t, size_t, int, int, int, off_t);
+#define drm_smmap smmap64
+#elif defined(_SYSCALL32_IMPL) || defined(_ILP32)
+extern caddr_t smmap32(caddr32_t, size32_t, int, int, int, off32_t);
+#define drm_smmap smmap32
+#else
+#error "No define for _LP64, _SYSCALL32_IMPL or _ILP32"
+#endif
-#define PAGE_MASK (PAGE_SIZE-1)
-#define round_page(x) (((x) + PAGE_MASK) & ~PAGE_MASK)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define round_page(x) (((x) + (PAGE_SIZE - 1)) & PAGE_MASK)
-/*
- * Compute order. Can be made faster.
- */
-int
-drm_order(unsigned long size)
+static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
+ struct drm_local_map *map)
{
- int order = 0;
- unsigned long tmp = size;
-
- while (tmp >>= 1)
- order ++;
-
- if (size & ~(1 << order))
- ++order;
+ struct drm_map_list *entry;
+ list_for_each_entry(entry, struct drm_map_list, &dev->maplist, head) {
+ /*
+ * Because the kernel-userspace ABI is fixed at a 32-bit offset
+ * while PCI resources may live above that, we ignore the map
+ * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
+ * It is assumed that each driver will have only one resource of
+ * each type.
+ */
+ if (!entry->map ||
+ map->type != entry->map->type ||
+ entry->master != dev->primary->master)
+ continue;
+ switch (map->type) {
+ case _DRM_SHM:
+ if (map->flags != _DRM_CONTAINS_LOCK)
+ break;
+ return entry;
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+ if ((entry->map->offset & 0xffffffff) ==
+ (map->offset & 0xffffffff))
+ return entry;
+ default: /* Make gcc happy */
+ ;
+ }
+ if (entry->map->offset == map->offset)
+ return entry;
+ }
- return (order);
+ return NULL;
}
-static inline drm_local_map_t *
-drm_find_map(drm_device_t *dev, u_offset_t offset, int type)
+int drm_map_handle(struct drm_device *dev, struct drm_map_list *list)
{
- drm_local_map_t *map;
+ int newid, ret;
- TAILQ_FOREACH(map, &dev->maplist, link) {
- if ((map->type == type) && ((map->offset == offset) ||
- (map->flags == _DRM_CONTAINS_LOCK) &&
- (map->type == _DRM_SHM)))
- return (map);
- }
+ ret = idr_get_new_above(&dev->map_idr, list, 1, &newid);
+ if (ret < 0)
+ return ret;
- return (NULL);
+ list->user_token = newid << PAGE_SHIFT;
+ return 0;
}
-int drm_addmap(drm_device_t *dev, unsigned long offset,
- unsigned long size, drm_map_type_t type,
- drm_map_flags_t flags, drm_local_map_t **map_ptr)
+/**
+ * Core function to create a range of memory available for mapping by a
+ * non-root process.
+ *
+ * Adjusts the memory offset to its absolute value according to the mapping
+ * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
+ * applicable and if supported by the kernel.
+ */
+static int drm_addmap_core(struct drm_device *dev, unsigned long offset,
+ unsigned long size, enum drm_map_type type,
+ enum drm_map_flags flags,
+ struct drm_map_list ** maplist)
{
- drm_local_map_t *map;
- caddr_t kva;
- int retval;
-
- /*
- * Only allow shared memory to be removable since we only keep
- * enough book keeping information about shared memory to allow
- * for removal when processes fork.
- */
- if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM)
- return (EINVAL);
- if ((offset & PAGE_MASK) || (size & PAGE_MASK))
- return (EINVAL);
- if (offset + size < offset)
- return (EINVAL);
-
- /*
- * Check if this is just another version of a kernel-allocated
- * map, and just hand that back if so.
- */
- map = drm_find_map(dev, offset, type);
- if (map != NULL) {
- goto done;
- }
+ struct drm_local_map *map;
+ struct drm_map_list *list;
+ /* LINTED */
+ unsigned long user_token;
+ int ret;
- /*
- * Allocate a new map structure, fill it in, and do any
- * type-specific initialization necessary.
- */
- map = drm_alloc(sizeof (*map), DRM_MEM_MAPS);
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
if (!map)
- return (ENOMEM);
+ return -ENOMEM;
map->offset = offset;
map->size = size;
- map->type = type;
map->flags = flags;
+ map->type = type;
+
+ /* Only allow shared memory to be removable since we only keep enough
+ * book keeping information about shared memory to allow for removal
+ * when processes fork.
+ */
+ if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
+ kfree(map, sizeof(*map));
+ return -EINVAL;
+ }
+ DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
+ (unsigned long long)map->offset, map->size, map->type);
+
+ /* page-align _DRM_SHM maps. They are allocated here so there is no security
+ * hole created by that and it works around various broken drivers that use
+ * a non-aligned quantity to map the SAREA. --BenH
+ */
+ if (map->type == _DRM_SHM)
+ map->size = PAGE_ALIGN(map->size);
+
+ if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
+ kfree(map, sizeof(*map));
+ return -EINVAL;
+ }
+
+ if (map->offset + map->size < map->offset) {
+ kfree(map, sizeof(*map));
+ return -EINVAL;
+ }
+
+ map->mtrr = -1;
+ map->handle = NULL;
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
- retval = drm_ioremap(dev, map);
- if (retval)
- return (retval);
- break;
+ /* Some drivers preinitialize some maps, without the X Server
+ * needing to be aware of it. Therefore, we just return success
+ * when the server tries to create a duplicate map.
+ */
+ list = drm_find_matching_map(dev, map);
+ if (list != NULL) {
+ if (list->map->size != map->size) {
+ DRM_DEBUG("Matching maps of type %d with "
+ "mismatched sizes, (%ld vs %ld)\n",
+ map->type, map->size,
+ list->map->size);
+ list->map->size = map->size;
+ }
+ kfree(map, sizeof(struct drm_local_map));
+ *maplist = list;
+ return 0;
+ }
+
+ if (map->type == _DRM_REGISTERS) {
+ map->handle = ioremap(map->offset, map->size);
+ if (!map->handle) {
+ kfree(map, sizeof(struct drm_local_map));
+ return -ENOMEM;
+ }
+ }
+
+ break;
case _DRM_SHM:
- /*
- * ddi_umem_alloc() grants page-aligned memory. We needn't
- * handle alignment issue here.
- */
- map->handle = ddi_umem_alloc(map->size,
- DDI_UMEM_NOSLEEP, &map->drm_umem_cookie);
+ list = drm_find_matching_map(dev, map);
+ if (list != NULL) {
+ if(list->map->size != map->size) {
+ DRM_DEBUG("Matching maps of type %d with "
+ "mismatched sizes, (%ld vs %ld)\n",
+ map->type, map->size, list->map->size);
+ list->map->size = map->size;
+ }
+
+ kfree(map, sizeof(struct drm_local_map));
+ *maplist = list;
+ return 0;
+ }
+ map->handle = ddi_umem_alloc(map->size, DDI_UMEM_NOSLEEP, &map->umem_cookie);
+ DRM_DEBUG("%lu %p\n",
+ map->size, map->handle);
if (!map->handle) {
- DRM_ERROR("drm_addmap: ddi_umem_alloc failed");
- drm_free(map, sizeof (*map), DRM_MEM_MAPS);
- return (ENOMEM);
+ kfree(map, sizeof(struct drm_local_map));
+ return -ENOMEM;
}
- /*
- * record only low 32-bit of this handle, since 32-bit
- * user app is incapable of passing in 64bit offset when
- * doing mmap.
- */
map->offset = (uintptr_t)map->handle;
- map->offset &= 0xffffffffUL;
if (map->flags & _DRM_CONTAINS_LOCK) {
/* Prevent a 2nd X Server from creating a 2nd lock */
- if (dev->lock.hw_lock != NULL) {
- ddi_umem_free(map->drm_umem_cookie);
- drm_free(map, sizeof (*map), DRM_MEM_MAPS);
- return (EBUSY);
+ if (dev->primary->master->lock.hw_lock != NULL) {
+ ddi_umem_free(map->umem_cookie);
+ kfree(map, sizeof(struct drm_local_map));
+ return -EBUSY;
}
- dev->lock.hw_lock = map->handle; /* Pointer to lock */
+ dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
}
- map->dev_addr = map->handle;
+ break;
+ case _DRM_AGP: {
+ caddr_t kvaddr;
+
+ if (!drm_core_has_AGP(dev)) {
+ kfree(map, sizeof(struct drm_local_map));
+ return -EINVAL;
+ }
+
+ map->offset += dev->agp->base;
+ kvaddr = gfxp_alloc_kernel_space(map->size);
+ if (!kvaddr) {
+ DRM_ERROR("failed to alloc AGP aperture");
+ kfree(map, sizeof(struct drm_local_map));
+ return -EPERM;
+ }
+ gfxp_load_kernel_space(map->offset, map->size,
+ GFXP_MEMORY_WRITECOMBINED, kvaddr);
+ map->handle = (void *)(uintptr_t)kvaddr;
+ map->umem_cookie = gfxp_umem_cookie_init(map->handle, map->size);
+ if (!map->umem_cookie) {
+ DRM_ERROR("gfxp_umem_cookie_init() failed");
+ gfxp_unmap_kernel_space(map->handle, map->size);
+ kfree(map, sizeof(struct drm_local_map));
+ return (-ENOMEM);
+ }
+ break;
+ }
+ case _DRM_GEM:
+ DRM_ERROR("tried to addmap GEM object\n");
break;
case _DRM_SCATTER_GATHER:
if (!dev->sg) {
- drm_free(map, sizeof (*map), DRM_MEM_MAPS);
- return (EINVAL);
+ kfree(map, sizeof(struct drm_local_map));
+ return -EINVAL;
}
map->offset += (uintptr_t)dev->sg->virtual;
- map->handle = (void *)(uintptr_t)map->offset;
- map->dev_addr = dev->sg->virtual;
- map->dev_handle = dev->sg->dmah_sg->acc_hdl;
+ map->handle = (void *)map->offset;
+ map->umem_cookie = gfxp_umem_cookie_init(map->handle, map->size);
+ if (!map->umem_cookie) {
+ DRM_ERROR("gfxp_umem_cookie_init() failed");
+ kfree(map, sizeof(struct drm_local_map));
+ return (-ENOMEM);
+ }
break;
-
case _DRM_CONSISTENT:
DRM_ERROR("%d DRM_AGP_CONSISTENT", __LINE__);
- return (ENOTSUP);
- case _DRM_AGP:
- map->offset += dev->agp->base;
- kva = gfxp_map_kernel_space(map->offset, map->size,
- GFXP_MEMORY_WRITECOMBINED);
- if (kva == 0) {
- drm_free(map, sizeof (*map), DRM_MEM_MAPS);
- cmn_err(CE_WARN,
- "drm_addmap: failed to map AGP aperture");
- return (ENOMEM);
- }
- map->handle = (void *)(uintptr_t)kva;
- map->dev_addr = kva;
- break;
+ kfree(map, sizeof(struct drm_local_map));
+ return -ENOTSUP;
default:
- drm_free(map, sizeof (*map), DRM_MEM_MAPS);
- return (EINVAL);
+ kfree(map, sizeof(struct drm_local_map));
+ return -EINVAL;
}
- TAILQ_INSERT_TAIL(&dev->maplist, map, link);
+ list = kmalloc(sizeof(*list), GFP_KERNEL);
+ if (!list) {
+ if (map->type == _DRM_REGISTERS)
+ iounmap(map->handle);
+ kfree(map, sizeof(struct drm_local_map));
+ return -EINVAL;
+ }
+ (void) memset(list, 0, sizeof(*list));
+ list->map = map;
+
+ mutex_lock(&dev->struct_mutex);
+ list_add(&list->head, &dev->maplist, (caddr_t)list);
+
+ /* Assign a 32-bit handle */
+ /* We do it here so that dev->struct_mutex protects the increment */
+ user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
+ map->offset;
+ ret = drm_map_handle(dev, list);
+ if (ret) {
+ if (map->type == _DRM_REGISTERS)
+ iounmap(map->handle);
+ kfree(map, sizeof(struct drm_local_map));
+ kfree(list, sizeof(struct drm_map_list));
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
-done:
- /* Jumped to, with lock held, when a kernel map is found. */
- *map_ptr = map;
+ mutex_unlock(&dev->struct_mutex);
- return (0);
+ if (!(map->flags & _DRM_DRIVER))
+ list->master = dev->primary->master;
+ *maplist = list;
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_addmap_ioctl(DRM_IOCTL_ARGS)
+int drm_addmap(struct drm_device *dev, unsigned long offset,
+ unsigned long size, enum drm_map_type type,
+ enum drm_map_flags flags, struct drm_local_map ** map_ptr)
{
- drm_map_t request;
- drm_local_map_t *map;
+ struct drm_map_list *list;
+ int rc;
+
+ rc = drm_addmap_core(dev, offset, size, type, flags, &list);
+ if (!rc)
+ *map_ptr = list->map;
+ return rc;
+}
+
+/**
+ * Ioctl to specify a range of memory that is available for mapping by a
+ * non-root process.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_map structure.
+ * \return zero on success or a negative value on error.
+ *
+ */
+/* LINTED */
+int drm_addmap_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_map *map = data;
+ struct drm_map_list *maplist;
int err;
- DRM_DEVICE;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_map_32_t request32;
- DRM_COPYFROM_WITH_RETURN(&request32,
- (void *)data, sizeof (request32));
- request.offset = request32.offset;
- request.size = request32.size;
- request.type = request32.type;
- request.flags = request32.flags;
- request.mtrr = request32.mtrr;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request,
- (void *)data, sizeof (request));
-
- err = drm_addmap(dev, request.offset, request.size, request.type,
- request.flags, &map);
-
- if (err != 0)
- return (err);
-
- request.offset = map->offset;
- request.size = map->size;
- request.type = map->type;
- request.flags = map->flags;
- request.mtrr = map->mtrr;
- request.handle = (uintptr_t)map->handle;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_map_32_t request32;
- request32.offset = request.offset;
- request32.size = (uint32_t)request.size;
- request32.type = request.type;
- request32.flags = request.flags;
- request32.handle = request.handle;
- request32.mtrr = request.mtrr;
- DRM_COPYTO_WITH_RETURN((void *)data,
- &request32, sizeof (request32));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data,
- &request, sizeof (request));
- return (0);
+ if (!(DRM_SUSER(credp) || map->type == _DRM_AGP || map->type == _DRM_SHM))
+ return -EPERM;
+
+ err = drm_addmap_core(dev, map->offset, map->size, map->type,
+ map->flags, &maplist);
+
+ if (err)
+ return err;
+
+ /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
+ map->handle = maplist->user_token;
+ return 0;
}
-void
-drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
+/**
+ * Remove a map private from list and deallocate resources if the mapping
+ * isn't in use.
+ *
+ * Searches the map on drm_device::maplist, removes it from the list, see if
+ * its being used, and free any associate resource (such as MTRR's) if it's not
+ * being on use.
+ *
+ * \sa drm_addmap
+ */
+int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
{
- DRM_SPINLOCK_ASSERT(&dev->dev_lock);
+ struct drm_map_list *r_list = NULL, *list_t;
+ /* LINTED */
+ drm_dma_handle_t dmah;
+ int found = 0;
+ /* LINTED */
+ struct drm_master *master;
+
+ /* Find the list entry for the map and remove it */
+ list_for_each_entry_safe(r_list, list_t, struct drm_map_list, &dev->maplist, head) {
+ if (r_list->map == map) {
+ master = r_list->master;
+ list_del(&r_list->head);
+ (void) idr_remove(&dev->map_idr,
+ r_list->user_token >> PAGE_SHIFT);
+ kfree(r_list, sizeof(struct drm_map_list));
+ found = 1;
+ break;
+ }
+ }
- TAILQ_REMOVE(&dev->maplist, map, link);
+ if (!found)
+ return -EINVAL;
switch (map->type) {
case _DRM_REGISTERS:
- drm_ioremapfree(map);
- break;
+ iounmap(map->handle);
/* FALLTHROUGH */
case _DRM_FRAME_BUFFER:
- drm_ioremapfree(map);
break;
case _DRM_SHM:
- ddi_umem_free(map->drm_umem_cookie);
+ ddi_umem_free(map->umem_cookie);
break;
case _DRM_AGP:
- /*
- * we mapped AGP aperture into kernel space in drm_addmap,
- * here, unmap them and release kernel virtual address space
- */
- gfxp_unmap_kernel_space(map->dev_addr, map->size);
+ gfxp_umem_cookie_destroy(map->umem_cookie);
+ gfxp_unmap_kernel_space(map->handle, map->size);
break;
-
case _DRM_SCATTER_GATHER:
+ gfxp_umem_cookie_destroy(map->umem_cookie);
break;
case _DRM_CONSISTENT:
break;
- default:
+ case _DRM_GEM:
+ DRM_ERROR("tried to rmmap GEM object\n");
break;
}
+ kfree(map, sizeof(struct drm_local_map));
- drm_free(map, sizeof (*map), DRM_MEM_MAPS);
+ return 0;
}
-/*
- * Remove a map private from list and deallocate resources if the
- * mapping isn't in use.
- */
-/*ARGSUSED*/
-int
-drm_rmmap_ioctl(DRM_IOCTL_ARGS)
+int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
{
- DRM_DEVICE;
- drm_local_map_t *map;
- drm_map_t request;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_map_32_t request32;
- DRM_COPYFROM_WITH_RETURN(&request32,
- (void *)data, sizeof (drm_map_32_t));
- request.offset = request32.offset;
- request.size = request32.size;
- request.type = request32.type;
- request.flags = request32.flags;
- request.handle = request32.handle;
- request.mtrr = request32.mtrr;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request,
- (void *)data, sizeof (request));
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_rmmap_locked(dev, map);
+ mutex_unlock(&dev->struct_mutex);
- DRM_LOCK();
- TAILQ_FOREACH(map, &dev->maplist, link) {
- if (((uintptr_t)map->handle == (request.handle & 0xffffffff)) &&
- (map->flags & _DRM_REMOVABLE))
+ return ret;
+}
+
+/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
+ * the last close of the device, and this is necessary for cleanup when things
+ * exit uncleanly. Therefore, having userland manually remove mappings seems
+ * like a pointless exercise since they're going away anyway.
+ *
+ * One use case might be after addmap is allowed for normal users for SHM and
+ * gets used by drivers that the server doesn't need to care about. This seems
+ * unlikely.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_map structure.
+ * \return zero on success or a negative value on error.
+ */
+/* LINTED */
+int drm_rmmap_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_map *request = data;
+ struct drm_local_map *map = NULL;
+ struct drm_map_list *r_list;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry(r_list, struct drm_map_list, &dev->maplist, head) {
+ if (r_list->map &&
+ r_list->user_token == (unsigned long)request->handle &&
+ r_list->map->flags & _DRM_REMOVABLE) {
+ map = r_list->map;
break;
+ }
+ }
+
+ /* List has wrapped around to the head pointer, or its empty we didn't
+ * find anything.
+ */
+ if (list_empty(&dev->maplist) || !map) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
}
- /* No match found. */
- if (map == NULL) {
- DRM_UNLOCK();
- return (EINVAL);
+ /* Register and framebuffer maps are permanent */
+ if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
}
- drm_rmmap(dev, map);
- DRM_UNLOCK();
+ ret = drm_rmmap_locked(dev, map);
- return (0);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
}
-/*ARGSUSED*/
-static void
-drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
+/**
+ * Cleanup after an error on one of the addbufs() functions.
+ *
+ * \param dev DRM device.
+ * \param entry buffer entry where the error occurred.
+ *
+ * Frees any pages and buffers associated with the given entry.
+ */
+/* LINTED */
+static void drm_cleanup_buf_error(struct drm_device * dev,
+ struct drm_buf_entry * entry)
{
int i;
@@ -353,51 +505,41 @@ drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
"drm_cleanup_buf_error: not implemented");
}
}
- drm_free(entry->seglist,
- entry->seg_count *
- sizeof (*entry->seglist), DRM_MEM_SEGS);
+ kfree(entry->seglist, entry->seg_count * sizeof (*entry->seglist));
+
entry->seg_count = 0;
}
if (entry->buf_count) {
for (i = 0; i < entry->buf_count; i++) {
if (entry->buflist[i].dev_private) {
- drm_free(entry->buflist[i].dev_private,
- entry->buflist[i].dev_priv_size,
- DRM_MEM_BUFS);
+ kfree(entry->buflist[i].dev_private,
+ entry->buflist[i].dev_priv_size);
}
}
- drm_free(entry->buflist,
- entry->buf_count *
- sizeof (*entry->buflist), DRM_MEM_BUFS);
- entry->buflist = NULL;
+ kfree(entry->buflist, entry->buf_count * sizeof (*entry->buflist));
+
entry->buf_count = 0;
}
}
-/*ARGSUSED*/
-int
-drm_markbufs(DRM_IOCTL_ARGS)
-{
- DRM_DEBUG("drm_markbufs");
- return (EINVAL);
-}
-
-/*ARGSUSED*/
-int
-drm_infobufs(DRM_IOCTL_ARGS)
-{
- DRM_DEBUG("drm_infobufs");
- return (EINVAL);
-}
-
-static int
-drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
+/**
+ * Add AGP buffers for DMA transfers.
+ *
+ * \param dev struct drm_device to which the buffers are to be added.
+ * \param request pointer to a struct drm_buf_desc describing the request.
+ * \return zero on success or a negative number on failure.
+ *
+ * After some sanity checks creates a drm_buf structure for each buffer and
+ * reallocates the buffer list of the same size order to accommodate the new
+ * buffers.
+ */
+/* LINTED */
+int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request, cred_t *credp)
{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_entry_t *entry;
- drm_buf_t **temp_buflist;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_entry *entry;
+ struct drm_buf *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
@@ -405,79 +547,117 @@ drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
int size;
int alignment;
int page_order;
+ int total;
int byte_count;
int i;
+ struct drm_buf **temp_buflist;
if (!dma)
- return (EINVAL);
+ return -EINVAL;
count = request->count;
order = drm_order(request->size);
size = 1 << order;
- alignment = (request->flags & _DRM_PAGE_ALIGN)
+ alignment = (request->flags & _DRM_PAGE_ALIGN)
? round_page(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = dev->agp->base + request->agp_start;
- entry = &dma->bufs[order];
+ DRM_DEBUG("count: %d\n", count);
+ DRM_DEBUG("order: %d\n", order);
+ DRM_DEBUG("size: %d\n", size);
+ DRM_DEBUG("agp_offset: %lx\n", agp_offset);
+ DRM_DEBUG("alignment: %d\n", alignment);
+ DRM_DEBUG("page_order: %d\n", page_order);
+ DRM_DEBUG("total: %d\n", total);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+ return -EINVAL;
+
+ spin_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ spin_unlock(&dev->count_lock);
- /* No more than one allocation per order */
+ mutex_lock(&dev->struct_mutex);
+ entry = &dma->bufs[order];
if (entry->buf_count) {
- return (ENOMEM);
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM; /* May only call once for each order */
}
- entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
- DRM_MEM_BUFS);
+ if (count < 0 || count > 4096) {
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -EINVAL;
+ }
+
+ entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
- return (ENOMEM);
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
}
+ (void) memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
- buf->offset = (dma->byte_count + offset);
+ buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset);
- buf->next = NULL;
- buf->pending = 0;
- buf->filp = NULL;
+ buf->address = (void *)(agp_offset + offset);
+ buf->next = NULL;
+ buf->pending = 0;
+ buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->buf_priv_size;
- buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
- if (buf->dev_private == NULL) {
+ buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+ if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
- return (ENOMEM);
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
}
+ (void) memset(buf->dev_private, 0, buf->dev_priv_size);
+
+ DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
- temp_buflist = drm_alloc(
- (dma->buf_count + entry->buf_count) * sizeof (*dma->buflist),
- DRM_MEM_BUFS);
+ DRM_DEBUG("byte_count: %d\n", byte_count);
- if (temp_buflist == NULL) {
+ temp_buflist = kmalloc(
+ (dma->buf_count + entry->buf_count) *
+ sizeof(*dma->buflist), GFP_KERNEL);
+ if (!temp_buflist) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
- DRM_ERROR(" temp_buflist is NULL");
- return (ENOMEM);
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
}
-
bcopy(temp_buflist, dma->buflist,
dma->buf_count * sizeof (*dma->buflist));
kmem_free(dma->buflist, dma->buf_count *sizeof (*dma->buflist));
@@ -488,24 +668,29 @@ drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
}
dma->buf_count += entry->buf_count;
- dma->byte_count += byte_count;
dma->seg_count += entry->seg_count;
dma->page_count += byte_count >> PAGE_SHIFT;
+ dma->byte_count += byte_count;
+
+ DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+ DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+ mutex_unlock(&dev->struct_mutex);
request->count = entry->buf_count;
request->size = size;
dma->flags = _DRM_DMA_USE_AGP;
- return (0);
+ atomic_dec(&dev->buf_alloc);
+ return 0;
}
-static int
-drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
+static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request, cred_t *credp)
{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_entry_t *entry;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_entry *entry;
+ struct drm_buf *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
@@ -513,26 +698,73 @@ drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
int size;
int alignment;
int page_order;
+ int total;
int byte_count;
int i;
- drm_buf_t **temp_buflist;
+ struct drm_buf **temp_buflist;
+
+ if (!drm_core_check_feature(dev, DRIVER_SG))
+ return -EINVAL;
+
+ if (!dma)
+ return -EINVAL;
+
+ if (!DRM_SUSER(credp))
+ return -EPERM;
count = request->count;
order = drm_order(request->size);
size = 1 << order;
- alignment = (request->flags & _DRM_PAGE_ALIGN)
+ alignment = (request->flags & _DRM_PAGE_ALIGN)
? round_page(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = request->agp_start;
+
+ DRM_DEBUG("count: %d\n", count);
+ DRM_DEBUG("order: %d\n", order);
+ DRM_DEBUG("size: %d\n", size);
+ DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+ DRM_DEBUG("alignment: %d\n", alignment);
+ DRM_DEBUG("page_order: %d\n", page_order);
+ DRM_DEBUG("total: %d\n", total);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+ return -EINVAL;
+
+ spin_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ spin_unlock(&dev->count_lock);
+
+ mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
+ if (entry->buf_count) {
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM; /* May only call once for each order */
+ }
+
+ if (count < 0 || count > 4096) {
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -EINVAL;
+ }
- entry->buflist = drm_alloc(count * sizeof (*entry->buflist),
- DRM_MEM_BUFS);
- if (entry->buflist == NULL)
- return (ENOMEM);
+ entry->buflist = kmalloc(count * sizeof(*entry->buflist),
+ GFP_KERNEL);
+ if (!entry->buflist) {
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ (void) memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
@@ -540,41 +772,51 @@ drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
offset = 0;
while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
- buf->offset = (dma->byte_count + offset);
+ buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset + dev->sg->handle);
- buf->next = NULL;
- buf->pending = 0;
- buf->filp = NULL;
+ buf->next = NULL;
+ buf->pending = 0;
+ buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->buf_priv_size;
- buf->dev_private = drm_alloc(buf->dev_priv_size,
- DRM_MEM_BUFS);
- if (buf->dev_private == NULL) {
+ buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+ if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
- return (ENOMEM);
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
}
+ (void) memset(buf->dev_private, 0, buf->dev_priv_size);
+
+ DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
+ DRM_DEBUG("byte_count: %d\n", byte_count);
+
temp_buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof (*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof (*dma->buflist), DRM_MEM_BUFS);
if (!temp_buflist) {
+ /* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
- return (ENOMEM);
+ mutex_unlock(&dev->struct_mutex);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
}
dma->buflist = temp_buflist;
@@ -584,314 +826,386 @@ drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
dma->buf_count += entry->buf_count;
dma->byte_count += byte_count;
+
+ DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+ DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+ mutex_unlock(&dev->struct_mutex);
+
request->count = entry->buf_count;
request->size = size;
+
dma->flags = _DRM_DMA_USE_SG;
- return (0);
+ atomic_dec(&dev->buf_alloc);
+ return 0;
}
-int
-drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
+/**
+ * Add buffers for DMA transfers (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_buf_desc request.
+ * \return zero on success or a negative number on failure.
+ *
+ * According with the memory type specified in drm_buf_desc::flags and the
+ * build options, it dispatches the call either to addbufs_agp(),
+ * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
+ * PCI memory respectively.
+ */
+/* LINTED */
+int drm_addbufs(DRM_IOCTL_ARGS)
{
- int order, ret;
-
- DRM_SPINLOCK(&dev->dma_lock);
-
- if (request->count < 0 || request->count > 4096) {
- DRM_SPINLOCK(&dev->dma_lock);
- return (EINVAL);
- }
+ struct drm_buf_desc *request = data;
+ int ret = -EINVAL;
- order = drm_order(request->size);
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
- DRM_SPINLOCK(&dev->dma_lock);
- return (EINVAL);
- }
-
- /* No more allocations after first buffer-using ioctl. */
- if (dev->buf_use != 0) {
- DRM_SPINUNLOCK(&dev->dma_lock);
- return (EBUSY);
- }
- /* No more than one allocation per order */
- if (dev->dma->bufs[order].buf_count != 0) {
- DRM_SPINUNLOCK(&dev->dma_lock);
- return (ENOMEM);
- }
-
- ret = drm_do_addbufs_agp(dev, request);
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ return -EINVAL;
- DRM_SPINUNLOCK(&dev->dma_lock);
+ if (request->flags & _DRM_AGP_BUFFER)
+ ret = drm_addbufs_agp(dev, request, credp);
+ else
+ if (request->flags & _DRM_SG_BUFFER)
+ ret = drm_addbufs_sg(dev, request, credp);
- return (ret);
+ return ret;
}
-int
-drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
+/**
+ * Get information about the buffer mappings.
+ *
+ * This was originally mean for debugging purposes, or by a sophisticated
+ * client library to determine how best to use the available buffers (e.g.,
+ * large buffers can be used for image transfer).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Increments drm_device::buf_use while holding the drm_device::count_lock
+ * lock, preventing of allocating more buffers after this call. Information
+ * about each requested buffer is then copied into user space.
+ */
+/* LINTED */
+int drm_infobufs(DRM_IOCTL_ARGS)
{
- int order, ret;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_info *request = data;
+ int i;
+ int count;
- DRM_SPINLOCK(&dev->dma_lock);
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ return -EINVAL;
- if (request->count < 0 || request->count > 4096) {
- DRM_SPINUNLOCK(&dev->dma_lock);
- return (EINVAL);
- }
+ if (!dma)
+ return -EINVAL;
- order = drm_order(request->size);
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) {
- DRM_SPINUNLOCK(&dev->dma_lock);
- return (EINVAL);
+ spin_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
}
+ ++dev->buf_use; /* Can't allocate more after this call */
+ spin_unlock(&dev->count_lock);
- /* No more allocations after first buffer-using ioctl. */
- if (dev->buf_use != 0) {
- DRM_SPINUNLOCK(&dev->dma_lock);
- return (EBUSY);
+ for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+ if (dma->bufs[i].buf_count)
+ ++count;
}
- /* No more than one allocation per order */
- if (dev->dma->bufs[order].buf_count != 0) {
- DRM_SPINUNLOCK(&dev->dma_lock);
- return (ENOMEM);
+ DRM_DEBUG("count = %d\n", count);
+
+ if (request->count >= count) {
+ for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+ if (dma->bufs[i].buf_count) {
+ struct drm_buf_desc __user *to =
+ &request->list[count];
+ struct drm_buf_entry *from = &dma->bufs[i];
+ struct drm_freelist *list = &dma->bufs[i].freelist;
+ if (copy_to_user(&to->count,
+ &from->buf_count,
+ sizeof(from->buf_count)) ||
+ copy_to_user(&to->size,
+ &from->buf_size,
+ sizeof(from->buf_size)) ||
+ copy_to_user(&to->low_mark,
+ &list->low_mark,
+ sizeof(list->low_mark)) ||
+ copy_to_user(&to->high_mark,
+ &list->high_mark,
+ sizeof(list->high_mark)))
+ return -EFAULT;
+
+ DRM_DEBUG("%d %d %d %d %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].freelist.low_mark,
+ dma->bufs[i].freelist.high_mark);
+ ++count;
+ }
+ }
}
+ request->count = count;
- ret = drm_do_addbufs_sg(dev, request);
- DRM_SPINUNLOCK(&dev->dma_lock);
- return (ret);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_addbufs_ioctl(DRM_IOCTL_ARGS)
+/**
+ * Specifies a low and high water mark for buffer allocation
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg a pointer to a drm_buf_desc structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies that the size order is bounded between the admissible orders and
+ * updates the respective drm_device_dma::bufs entry low and high water mark.
+ *
+ * \note This ioctl is deprecated and mostly never used.
+ */
+/* LINTED */
+int drm_markbufs(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_buf_desc_t request;
- int err;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_desc *request = data;
+ int order;
+ struct drm_buf_entry *entry;
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_buf_desc_32_t request32;
- DRM_COPYFROM_WITH_RETURN(&request32,
- (void *)data, sizeof (request32));
- request.count = request32.count;
- request.size = request32.size;
- request.low_mark = request32.low_mark;
- request.high_mark = request32.high_mark;
- request.flags = request32.flags;
- request.agp_start = request32.agp_start;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request,
- (void *)data, sizeof (request));
-
- if (request.flags & _DRM_AGP_BUFFER)
- err = drm_addbufs_agp(dev, &request);
- else if (request.flags & _DRM_SG_BUFFER)
- err = drm_addbufs_sg(dev, &request);
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_buf_desc_32_t request32;
- request32.count = request.count;
- request32.size = request.size;
- request32.low_mark = request.low_mark;
- request32.high_mark = request.high_mark;
- request32.flags = request.flags;
- request32.agp_start = (uint32_t)request.agp_start;
- DRM_COPYTO_WITH_RETURN((void *)data,
- &request32, sizeof (request32));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data,
- &request, sizeof (request));
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ return -EINVAL;
+
+ if (!dma)
+ return -EINVAL;
- return (err);
+ DRM_DEBUG("%d, %d, %d\n",
+ request->size, request->low_mark, request->high_mark);
+ order = drm_order(request->size);
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+ return -EINVAL;
+ entry = &dma->bufs[order];
+
+ if (request->low_mark < 0 || request->low_mark > entry->buf_count)
+ return -EINVAL;
+ if (request->high_mark < 0 || request->high_mark > entry->buf_count)
+ return -EINVAL;
+
+ entry->freelist.low_mark = request->low_mark;
+ entry->freelist.high_mark = request->high_mark;
+
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_freebufs(DRM_IOCTL_ARGS)
+/**
+ * Unreserve the buffers in list, previously reserved using drmDMA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_free structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls free_buffer() for each used buffer.
+ * This function is primarily used for debugging.
+ */
+/* LINTED */
+int drm_freebufs(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_free_t request;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_free *request = data;
int i;
int idx;
- drm_buf_t *buf;
- int retcode = 0;
+ struct drm_buf *buf;
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_buf_free_32_t request32;
- DRM_COPYFROM_WITH_RETURN(&request32,
- (void*)data, sizeof (request32));
- request.count = request32.count;
- request.list = (int *)(uintptr_t)request32.list;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request,
- (void *)data, sizeof (request));
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ return -EINVAL;
- for (i = 0; i < request.count; i++) {
- if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof (idx))) {
- retcode = EFAULT;
- break;
- }
+ if (!dma)
+ return -EINVAL;
+
+ DRM_DEBUG("%d\n", request->count);
+ for (i = 0; i < request->count; i++) {
+ if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof (idx)))
+ return -EFAULT;
if (idx < 0 || idx >= dma->buf_count) {
- DRM_ERROR("drm_freebufs: Index %d (of %d max)\n",
- idx, dma->buf_count - 1);
- retcode = EINVAL;
- break;
+ DRM_ERROR("Index %d (of %d max)\n",
+ idx, dma->buf_count - 1);
+ return -EINVAL;
}
buf = dma->buflist[idx];
- if (buf->filp != fpriv) {
- DRM_ERROR(
- "drm_freebufs: process %d not owning the buffer.\n",
+ if (buf->file_priv != file) {
+ DRM_ERROR("Process %d freeing buffer not owned\n",
DRM_CURRENTPID);
- retcode = EINVAL;
- break;
+ return -EINVAL;
}
drm_free_buffer(dev, buf);
}
- return (retcode);
+ return 0;
}
-#ifdef _LP64
-extern caddr_t smmap64(caddr_t, size_t, int, int, int, off_t);
-#define drm_smmap smmap64
-#else
-#if defined(_SYSCALL32_IMPL) || defined(_ILP32)
-extern caddr_t smmap32(caddr32_t, size32_t, int, int, int, off32_t);
-#define drm_smmap smmap32
-#else
-#error "No define for _LP64, _SYSCALL32_IMPL or _ILP32"
-#endif
-#endif
-
-
-/*ARGSUSED*/
-int
-drm_mapbufs(DRM_IOCTL_ARGS)
+/**
+ * Maps all of the DMA buffers into client-virtual space (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
+ * about each buffer into user space. For PCI buffers, it calls do_mmap() with
+ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
+ * drm_mmap_dma().
+ */
+/* LINTED */
+int drm_mapbufs(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_buf_map_t request;
+ struct drm_device_dma *dma = dev->dma;
+ int retcode = 0;
const int zero = 0;
- unsigned long vaddr;
+ unsigned long virtual;
unsigned long address;
- drm_device_dma_t *dma = dev->dma;
- uint_t size;
- uint_t foff;
- int ret_tmp;
- int i;
-
-#ifdef _MULTI_DATAMODEL
- drm_buf_map_32_t request32;
- drm_buf_pub_32_t *list32;
- uint_t address32;
-
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- DRM_COPYFROM_WITH_RETURN(&request32,
- (void *)data, sizeof (request32));
- request.count = request32.count;
- request.virtual = (void *)(uintptr_t)request32.virtual;
- request.list = (drm_buf_pub_t *)(uintptr_t)request32.list;
- request.fd = request32.fd;
- } else
+ struct drm_buf_map *request = data;
+ int i;
+ uint_t size, foff;
+
+#ifdef _MULTI_DATAMODEL
+ struct drm_buf_pub_32 *list32;
+ uint_t address32;
#endif
- DRM_COPYFROM_WITH_RETURN(&request,
- (void *)data, sizeof (request));
-
- dev->buf_use++;
-
- if (request.count < dma->buf_count)
- goto done;
-
- if ((dev->driver->use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
- (dev->driver->use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
- drm_local_map_t *map = dev->agp_buffer_map;
- if (map == NULL)
- return (EINVAL);
- size = round_page(map->size);
- foff = (uintptr_t)map->handle;
- } else {
- size = round_page(dma->byte_count);
- foff = 0;
- }
- request.virtual = drm_smmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_SHARED, request.fd, foff);
- if (request.virtual == NULL) {
- DRM_ERROR("drm_mapbufs: request.virtual is NULL");
- return (EINVAL);
- }
-
- vaddr = (unsigned long) request.virtual;
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- list32 = (drm_buf_pub_32_t *)(uintptr_t)request32.list;
- for (i = 0; i < dma->buf_count; i++) {
- if (DRM_COPY_TO_USER(&list32[i].idx,
- &dma->buflist[i]->idx, sizeof (list32[0].idx))) {
- return (EFAULT);
+
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ return -EINVAL;
+
+ if (!dma)
+ return -EINVAL;
+
+ spin_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ dev->buf_use++; /* Can't allocate more after this call */
+ spin_unlock(&dev->count_lock);
+
+ if (request->count >= dma->buf_count) {
+ if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+ || (drm_core_check_feature(dev, DRIVER_SG)
+ && (dma->flags & _DRM_DMA_USE_SG))) {
+ struct drm_local_map *map = dev->agp_buffer_map;
+
+ if (!map) {
+ retcode = -EINVAL;
+ goto done;
}
- if (DRM_COPY_TO_USER(&list32[i].total,
- &dma->buflist[i]->total,
- sizeof (list32[0].total))) {
- return (EFAULT);
+ size = round_page(map->size);
+ foff = (uintptr_t)map->handle;
+ } else {
+ size = round_page(dma->byte_count);
+ foff = 0;
+ }
+ request->virtual = drm_smmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, request->fd, foff);
+ if (request->virtual == NULL) {
+ DRM_ERROR("request->virtual is NULL");
+ retcode = -EINVAL;
+ goto done;
+ }
+
+ virtual = (unsigned long) request->virtual;
+
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(ioctl_mode & FMODELS) == DDI_MODEL_ILP32) {
+ list32 = (drm_buf_pub_32_t *)(uintptr_t)request->list;
+ for (i = 0; i < dma->buf_count; i++) {
+ if (DRM_COPY_TO_USER(&list32[i].idx,
+ &dma->buflist[i]->idx,
+ sizeof (list32[0].idx))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (DRM_COPY_TO_USER(&list32[i].total,
+ &dma->buflist[i]->total,
+ sizeof (list32[0].total))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (DRM_COPY_TO_USER(&list32[i].used,
+ &zero, sizeof (zero))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ address32 = virtual + dma->buflist[i]->offset; /* *** */
+ if (DRM_COPY_TO_USER(&list32[i].address,
+ &address32, sizeof (list32[0].address))) {
+ retcode = -EFAULT;
+ goto done;
+ }
}
- if (DRM_COPY_TO_USER(&list32[i].used,
- &zero, sizeof (zero))) {
- return (EFAULT);
+ } else {
+#endif
+ for (i = 0; i < dma->buf_count; i++) {
+ if (DRM_COPY_TO_USER(&request->list[i].idx,
+ &dma->buflist[i]->idx,
+ sizeof (request->list[0].idx))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (DRM_COPY_TO_USER(&request->list[i].total,
+ &dma->buflist[i]->total,
+ sizeof (request->list[0].total))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
+ sizeof (zero))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ address = virtual + dma->buflist[i]->offset; /* *** */
+
+ if (DRM_COPY_TO_USER(&request->list[i].address,
+ &address, sizeof (address))) {
+ retcode = -EFAULT;
+ goto done;
+ }
}
- address32 = vaddr + dma->buflist[i]->offset; /* *** */
- ret_tmp = DRM_COPY_TO_USER(&list32[i].address,
- &address32, sizeof (list32[0].address));
- if (ret_tmp)
- return (EFAULT);
+#ifdef _MULTI_DATAMODEL
}
- goto done;
- }
#endif
+ }
+ done:
+ request->count = dma->buf_count;
+ DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
- ASSERT(ddi_model_convert_from(mode & FMODELS) != DDI_MODEL_ILP32);
- for (i = 0; i < dma->buf_count; i++) {
- if (DRM_COPY_TO_USER(&request.list[i].idx,
- &dma->buflist[i]->idx, sizeof (request.list[0].idx))) {
- return (EFAULT);
- }
- if (DRM_COPY_TO_USER(&request.list[i].total,
- &dma->buflist[i]->total, sizeof (request.list[0].total))) {
- return (EFAULT);
- }
- if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
- sizeof (zero))) {
- return (EFAULT);
- }
- address = vaddr + dma->buflist[i]->offset; /* *** */
+ return retcode;
+}
- ret_tmp = DRM_COPY_TO_USER(&request.list[i].address,
- &address, sizeof (address));
- if (ret_tmp) {
- return (EFAULT);
- }
- }
+/**
+ * Compute size order. Returns the exponent of the smaller power of two which
+ * is greater or equal to given number.
+ *
+ * \param size size.
+ * \return order.
+ *
+ * \todo Can be made faster.
+ */
+int drm_order(unsigned long size)
+{
+ int order;
+ unsigned long tmp;
-done:
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- request32.count = dma->buf_count;
- request32.virtual = (caddr32_t)(uintptr_t)request.virtual;
- DRM_COPYTO_WITH_RETURN((void *)data,
- &request32, sizeof (request32));
- } else {
-#endif
- request.count = dma->buf_count;
- DRM_COPYTO_WITH_RETURN((void *)data,
- &request, sizeof (request));
-#ifdef _MULTI_DATAMODEL
- }
-#endif
- return (0);
+ for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
+
+ if (size & (size - 1))
+ ++order;
+
+ return order;
}
+
diff --git a/usr/src/uts/common/io/drm/drm_cache.c b/usr/src/uts/common/io/drm/drm_cache.c
index fe7eff0..1f58fee 100644
--- a/usr/src/uts/common/io/drm/drm_cache.c
+++ b/usr/src/uts/common/io/drm/drm_cache.c
@@ -1,7 +1,10 @@
/*
- *
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
* Copyright(c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2012, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,10 +32,6 @@
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-/*
- * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
- */
-
#include <sys/x86_archext.h>
#include <vm/seg_kmem.h>
#include "drmP.h"
diff --git a/usr/src/uts/common/io/drm/drm_context.c b/usr/src/uts/common/io/drm/drm_context.c
index 16c141f..a9d783a 100644
--- a/usr/src/uts/common/io/drm/drm_context.c
+++ b/usr/src/uts/common/io/drm/drm_context.c
@@ -1,13 +1,22 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*/
/*
- * drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
- * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ * Copyright (c) 2006, 2013 Intel Corporation. All rights reserved.
+ */
+
+/**
+ * \file drm_context.c
+ * IOCTLs for generic contexts
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
*/
+
/*
+ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ *
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -30,418 +39,437 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
+/*
+ * ChangeLog:
+ * 2001-11-16 Torsten Duwe <duwe@caldera.de>
+ * added context constructor/destructor hooks,
+ * needed by SiS driver's memory management.
+ */
#include "drmP.h"
#include "drm_io32.h"
-static inline int
-find_first_zero_bit(volatile void *p, int max)
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
+/**
+ * Free a handle from the context bitmap.
+ *
+ * \param dev DRM device.
+ * \param ctx_handle context handle.
+ *
+ * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
+ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
+ * lock.
+ */
+void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
- int b;
- volatile int *ptr = (volatile int *)p;
-
- for (b = 0; b < max; b += 32) {
- if (ptr[b >> 5] != ~0) {
- for (;;) {
- if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0)
- return (b);
- b++;
- }
- }
- }
- return (max);
+ mutex_lock(&dev->struct_mutex);
+ (void) idr_remove(&dev->ctx_idr, ctx_handle);
+ mutex_unlock(&dev->struct_mutex);
}
-/*
- * Context bitmap support
+/**
+ * Context bitmap allocation.
+ *
+ * \param dev DRM device.
+ * \return (non-negative) context handle on success or a negative number on failure.
+ *
+ * Allocate a new idr from drm_device::ctx_idr while holding the
+ * drm_device::struct_mutex lock.
*/
-void
-drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle)
+static int drm_ctxbitmap_next(struct drm_device * dev)
{
- if (ctx_handle < 0 || ctx_handle >= DRM_MAX_CTXBITMAP ||
- dev->ctx_bitmap == NULL) {
- DRM_ERROR("drm_ctxbitmap_free: Attempt to free\
- invalid context handle: %d\n",
- ctx_handle);
- return;
- }
+ int new_id;
+ int ret;
- DRM_LOCK();
- clear_bit(ctx_handle, dev->ctx_bitmap);
- dev->context_sareas[ctx_handle] = NULL;
- DRM_UNLOCK();
+again:
+ if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("Out of memory expanding drawable idr\n");
+ return -ENOMEM;
+ }
+ mutex_lock(&dev->struct_mutex);
+ ret = idr_get_new_above(&dev->ctx_idr, NULL,
+ DRM_RESERVED_CONTEXTS, &new_id);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ return ret;
+
+ return new_id;
}
-/* Is supposed to return -1 if any error by calling functions */
-int
-drm_ctxbitmap_next(drm_device_t *dev)
+/**
+ * Context bitmap initialization.
+ *
+ * \param dev DRM device.
+ *
+ * Initialise the drm_device::ctx_idr
+ */
+int drm_ctxbitmap_init(struct drm_device * dev)
{
- int bit;
+ idr_init(&dev->ctx_idr);
+ return 0;
+}
- if (dev->ctx_bitmap == NULL)
- return (-1);
+/**
+ * Context bitmap cleanup.
+ *
+ * \param dev DRM device.
+ *
+ * Free all idr members using drm_ctx_sarea_free helper function
+ * while holding the drm_device::struct_mutex lock.
+ */
+void drm_ctxbitmap_cleanup(struct drm_device * dev)
+{
+ mutex_lock(&dev->struct_mutex);
+ idr_remove_all(&dev->ctx_idr);
+ mutex_unlock(&dev->struct_mutex);
+}
- DRM_LOCK();
- bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
- if (bit >= DRM_MAX_CTXBITMAP) {
- DRM_UNLOCK();
- return (-1);
- }
+/*@}*/
- set_bit(bit, dev->ctx_bitmap);
- DRM_DEBUG("drm_ctxbitmap_next: bit : %d", bit);
- if ((bit+1) > dev->max_context) {
- dev->max_context = (bit+1);
- if (dev->context_sareas != NULL) {
- drm_local_map_t **ctx_sareas;
- ctx_sareas = drm_realloc(dev->context_sareas,
- (dev->max_context - 1) *
- sizeof (*dev->context_sareas),
- dev->max_context *
- sizeof (*dev->context_sareas),
- DRM_MEM_MAPS);
- if (ctx_sareas == NULL) {
- clear_bit(bit, dev->ctx_bitmap);
- DRM_UNLOCK();
- return (-1);
- }
- dev->context_sareas = ctx_sareas;
- dev->context_sareas[bit] = NULL;
- } else {
- /* max_context == 1 at this point */
- dev->context_sareas = drm_alloc(dev->max_context *
- sizeof (*dev->context_sareas), KM_NOSLEEP);
- if (dev->context_sareas == NULL) {
- clear_bit(bit, dev->ctx_bitmap);
- DRM_UNLOCK();
- return (-1);
- }
- dev->context_sareas[bit] = NULL;
- }
- }
- DRM_UNLOCK();
- DRM_DEBUG("drm_ctxbitmap_next: return %d", bit);
- return (bit);
-}
+/******************************************************************/
+/** \name Per Context SAREA Support */
+/*@{*/
-int
-drm_ctxbitmap_init(drm_device_t *dev)
+/**
+ * Get per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Gets the map from drm_device::ctx_idr with the handle specified and
+ * returns its handle.
+ */
+/* LINTED */
+int drm_getsareactx(DRM_IOCTL_ARGS)
{
- int i;
- int temp;
+ struct drm_ctx_priv_map *request = data;
+ struct drm_local_map *map;
+ struct drm_map_list *_entry;
- DRM_LOCK();
- dev->ctx_bitmap = drm_calloc(1, DRM_PAGE_SIZE, DRM_MEM_CTXBITMAP);
- if (dev->ctx_bitmap == NULL) {
- DRM_UNLOCK();
- return (ENOMEM);
+ mutex_lock(&dev->struct_mutex);
+
+ map = idr_find(&dev->ctx_idr, request->ctx_id);
+ if (!map) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
}
- dev->context_sareas = NULL;
- dev->max_context = -1;
- DRM_UNLOCK();
- for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
- temp = drm_ctxbitmap_next(dev);
- DRM_DEBUG("drm_ctxbitmap_init : %d", temp);
+ request->handle = NULL;
+ list_for_each_entry(_entry, struct drm_map_list, &dev->maplist, head) {
+ if (_entry->map == map) {
+ request->handle =
+ (void *)(unsigned long)_entry->user_token;
+ break;
+ }
}
- return (0);
-}
-void
-drm_ctxbitmap_cleanup(drm_device_t *dev)
-{
- DRM_LOCK();
- if (dev->context_sareas != NULL)
- drm_free(dev->context_sareas,
- sizeof (*dev->context_sareas) *
- dev->max_context,
- DRM_MEM_MAPS);
- drm_free(dev->ctx_bitmap, DRM_PAGE_SIZE, DRM_MEM_CTXBITMAP);
- DRM_UNLOCK();
+ mutex_unlock(&dev->struct_mutex);
+
+ if (request->handle == NULL)
+ return -EINVAL;
+
+ return 0;
}
-/*
- * Per Context SAREA Support
+/**
+ * Set per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the mapping specified in \p arg and update the entry in
+ * drm_device::ctx_idr with it.
*/
-/*ARGSUSED*/
-int
-drm_getsareactx(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_setsareactx(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_ctx_priv_map_t request;
- drm_local_map_t *map;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_ctx_priv_map_32_t request32;
- DRM_COPYFROM_WITH_RETURN(&request32, (void *)data,
- sizeof (drm_ctx_priv_map_32_t));
- request.ctx_id = request32.ctx_id;
- request.handle = (void *)(uintptr_t)request32.handle;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request, (void *)data,
- sizeof (request));
-
- DRM_LOCK();
- if (dev->max_context < 0 || request.ctx_id >= (unsigned)
- dev->max_context) {
- DRM_UNLOCK();
- return (EINVAL);
+ struct drm_ctx_priv_map *request = data;
+ struct drm_local_map *map = NULL;
+ struct drm_map_list *r_list = NULL;
+
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry(r_list, struct drm_map_list, &dev->maplist, head) {
+ if (r_list->map
+ && r_list->user_token == (unsigned long) request->handle)
+ goto found;
}
+ bad:
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
- map = dev->context_sareas[request.ctx_id];
- DRM_UNLOCK();
-
+ found:
+ map = r_list->map;
if (!map)
- return (EINVAL);
-
- request.handle = map->handle;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_ctx_priv_map_32_t request32;
- request32.ctx_id = request.ctx_id;
- request32.handle = (caddr32_t)(uintptr_t)request.handle;
- DRM_COPYTO_WITH_RETURN((void *)data, &request32,
- sizeof (drm_ctx_priv_map_32_t));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data,
- &request, sizeof (request));
-
- return (0);
-}
+ goto bad;
-/*ARGSUSED*/
-int
-drm_setsareactx(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_ctx_priv_map_t request;
- drm_local_map_t *map = NULL;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_ctx_priv_map_32_t request32;
-
- DRM_COPYFROM_WITH_RETURN(&request32, (void *)data,
- sizeof (drm_ctx_priv_map_32_t));
- request.ctx_id = request32.ctx_id;
- request.handle = (void *)(uintptr_t)request32.handle;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request,
- (void *)data, sizeof (request));
-
- DRM_LOCK();
- TAILQ_FOREACH(map, &dev->maplist, link) {
- if (map->handle == request.handle) {
- if (dev->max_context < 0)
- goto bad;
- if (request.ctx_id >= (unsigned)dev->max_context)
- goto bad;
- dev->context_sareas[request.ctx_id] = map;
- DRM_UNLOCK();
- return (0);
- }
- }
+ if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
+ goto bad;
+
+ mutex_unlock(&dev->struct_mutex);
-bad:
- DRM_UNLOCK();
- return (EINVAL);
+ return 0;
}
-/*
- * The actual DRM context handling routines
+/*@}*/
+
+/******************************************************************/
+/** \name The actual DRM context handling routines */
+/*@{*/
+
+/**
+ * Switch context.
+ *
+ * \param dev DRM device.
+ * \param old old context handle.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to set drm_device::context_flag.
*/
-int
-drm_context_switch(drm_device_t *dev, int old, int new)
+static int drm_context_switch(struct drm_device * dev, int old, int new)
{
if (test_and_set_bit(0, &dev->context_flag)) {
- DRM_ERROR("drm_context_switch: Reentering -- FIXME");
- return (EBUSY);
+ DRM_ERROR("Reentering -- FIXME\n");
+ return -EBUSY;
}
- DRM_DEBUG("drm_context_switch: Context switch from %d to %d",
- old, new);
+ DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
- return (0);
+ return 0;
}
- return (0);
+ return 0;
}
-int
-drm_context_switch_complete(drm_device_t *dev, int new)
+/**
+ * Complete context switch.
+ *
+ * \param dev DRM device.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Updates drm_device::last_context and drm_device::last_switch. Verifies the
+ * hardware lock is held, clears the drm_device::context_flag and wakes up
+ * drm_device::context_wait.
+ */
+static int drm_context_switch_complete(struct drm_device *dev,
+ struct drm_file *file_priv, int new)
{
- dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = jiffies;
- if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
- DRM_ERROR(
- "drm_context_switch_complete: Lock not held");
+ if (file_priv->master->lock.hw_lock != NULL &&
+ !_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
}
- /*
- * If a context switch is ever initiated
- * when the kernel holds the lock, release
- * that lock here.
- */
+
+ /* If a context switch is ever initiated
+ when the kernel holds the lock, release
+ that lock here. */
clear_bit(0, &dev->context_flag);
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_resctx(DRM_IOCTL_ARGS)
+/**
+ * Reserve contexts.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_res structure.
+ * \return zero on success or a negative number on failure.
+ */
+/* LINTED */
+int drm_resctx(DRM_IOCTL_ARGS)
{
- drm_ctx_res_t res;
- drm_ctx_t ctx;
+ struct drm_ctx_res *res = data;
+ struct drm_ctx ctx;
int i;
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_ctx_res_32_t res32;
- DRM_COPYFROM_WITH_RETURN(&res32, (void *)data, sizeof (res32));
- res.count = res32.count;
- res.contexts = (drm_ctx_t *)(uintptr_t)res32.contexts;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&res, (void *)data, sizeof (res));
-
- if (res.count >= DRM_RESERVED_CONTEXTS) {
- bzero(&ctx, sizeof (ctx));
+ if (res->count >= DRM_RESERVED_CONTEXTS) {
+ (void) memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
- DRM_COPYTO_WITH_RETURN(&res.contexts[i],
+ DRM_COPYTO_WITH_RETURN(&res->contexts[i],
&ctx, sizeof (ctx));
}
}
- res.count = DRM_RESERVED_CONTEXTS;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_ctx_res_32_t res32;
- res32.count = res.count;
- res32.contexts = (caddr32_t)(uintptr_t)res.contexts;
+ res->count = DRM_RESERVED_CONTEXTS;
- DRM_COPYTO_WITH_RETURN((void *)data, &res32,
- sizeof (drm_ctx_res_32_t));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data, &res, sizeof (res));
-
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_addctx(DRM_IOCTL_ARGS)
+/**
+ * Add context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Get a new handle for the context and copy to userspace.
+ */
+/* LINTED */
+int drm_addctx(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_ctx_t ctx;
-
- DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+ struct drm_ctx_list *ctx_entry;
+ struct drm_ctx *ctx = data;
- ctx.handle = drm_ctxbitmap_next(dev);
- if (ctx.handle == DRM_KERNEL_CONTEXT) {
+ ctx->handle = drm_ctxbitmap_next(dev);
+ if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
- ctx.handle = drm_ctxbitmap_next(dev);
+ ctx->handle = drm_ctxbitmap_next(dev);
}
- if (ctx.handle == (drm_context_t)-1) {
- return (ENOMEM);
+ DRM_DEBUG("%d\n", ctx->handle);
+ /* LINTED */
+ if (ctx->handle == -1) {
+ DRM_DEBUG("Not enough free contexts.\n");
+ /* Should this return -EBUSY instead? */
+ return -ENOMEM;
}
- if (dev->driver->context_ctor && ctx.handle != DRM_KERNEL_CONTEXT) {
- dev->driver->context_ctor(dev, ctx.handle);
+ ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
+ if (!ctx_entry) {
+ DRM_DEBUG("out of memory\n");
+ return -ENOMEM;
}
- DRM_COPYTO_WITH_RETURN((void *)data, &ctx, sizeof (ctx));
+ INIT_LIST_HEAD(&ctx_entry->head);
+ ctx_entry->handle = ctx->handle;
+ ctx_entry->tag = file;
+
+ mutex_lock(&dev->ctxlist_mutex);
+ list_add(&ctx_entry->head, &dev->ctxlist, (caddr_t)ctx_entry);
+ ++dev->ctx_count;
+ mutex_unlock(&dev->ctxlist_mutex);
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_modctx(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_modctx(DRM_IOCTL_ARGS)
{
/* This does nothing */
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_getctx(DRM_IOCTL_ARGS)
+/**
+ * Get context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ */
+/* LINTED */
+int drm_getctx(DRM_IOCTL_ARGS)
{
- drm_ctx_t ctx;
-
- DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+ struct drm_ctx *ctx = data;
/* This is 0, because we don't handle any context flags */
- ctx.flags = 0;
+ ctx->flags = 0;
- DRM_COPYTO_WITH_RETURN((void *)data, &ctx, sizeof (ctx));
-
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_switchctx(DRM_IOCTL_ARGS)
+/**
+ * Switch context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch().
+ */
+/* LINTED */
+int drm_switchctx(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_ctx_t ctx;
-
- DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+ struct drm_ctx *ctx = data;
- DRM_DEBUG("drm_switchctx: %d", ctx.handle);
- return (drm_context_switch(dev, dev->last_context, ctx.handle));
+ DRM_DEBUG("%d\n", ctx->handle);
+ return drm_context_switch(dev, dev->last_context, ctx->handle);
}
-/*ARGSUSED*/
-int
-drm_newctx(DRM_IOCTL_ARGS)
+/**
+ * New context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch_complete().
+ */
+/* LINTED */
+int drm_newctx(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_ctx_t ctx;
-
- DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+ struct drm_ctx *ctx = data;
- DRM_DEBUG("drm_newctx: %d", ctx.handle);
- (void) drm_context_switch_complete(dev, ctx.handle);
+ DRM_DEBUG("%d\n", ctx->handle);
+ (void) drm_context_switch_complete(dev, file, ctx->handle);
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_rmctx(DRM_IOCTL_ARGS)
+/**
+ * Remove context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
+ */
+/* LINTED */
+int drm_rmctx(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_ctx_t ctx;
+ struct drm_ctx *ctx = data;
- DRM_COPYFROM_WITH_RETURN(&ctx, (void *)data, sizeof (ctx));
+ DRM_DEBUG("%d\n", ctx->handle);
+ if (ctx->handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver->context_dtor)
+ dev->driver->context_dtor(dev, ctx->handle);
+ drm_ctxbitmap_free(dev, ctx->handle);
+ }
- DRM_DEBUG("drm_rmctx : %d", ctx.handle);
- if (ctx.handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor) {
- DRM_LOCK();
- dev->driver->context_dtor(dev, ctx.handle);
- DRM_UNLOCK();
- }
+ mutex_lock(&dev->ctxlist_mutex);
+ if (!list_empty(&dev->ctxlist)) {
+ struct drm_ctx_list *pos, *n;
- drm_ctxbitmap_free(dev, ctx.handle);
+ list_for_each_entry_safe(pos, n, struct drm_ctx_list, &dev->ctxlist, head) {
+ if (pos->handle == ctx->handle) {
+ list_del(&pos->head);
+ kfree(pos, sizeof (*pos));
+ --dev->ctx_count;
+ }
+ }
}
+ mutex_unlock(&dev->ctxlist_mutex);
- return (0);
+ return 0;
}
+
+/*@}*/
diff --git a/usr/src/uts/common/io/drm/drm_crtc.c b/usr/src/uts/common/io/drm/drm_crtc.c
new file mode 100644
index 0000000..47976d7
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_crtc.c
@@ -0,0 +1,3944 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006-2008, 2013, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include "drm.h"
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "drm_fourcc.h"
+
+/**
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
+ *
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented.
+ */
+void drm_modeset_lock_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head)
+ mutex_lock(&crtc->mutex);
+}
+
+/**
+ * drm_modeset_unlock_all - drop all modeset locks
+ * @dev: device
+ */
+void drm_modeset_unlock_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head)
+ mutex_unlock(&crtc->mutex);
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+/* Avoid boilerplate. I'm tired of typing. */
+#define DRM_ENUM_NAME_FN(fnname, list) \
+ const char *fnname(int val) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(list); i++) { \
+ if (list[i].type == val) \
+ return list[i].name; \
+ } \
+ return "(unknown)"; \
+ }
+
+/*
+ * Global properties
+ */
+static const struct drm_prop_enum_list drm_dpms_enum_list[] =
+{ { DRM_MODE_DPMS_ON, "On" },
+ { DRM_MODE_DPMS_STANDBY, "Standby" },
+ { DRM_MODE_DPMS_SUSPEND, "Suspend" },
+ { DRM_MODE_DPMS_OFF, "Off" }
+};
+
+DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
+/*
+ * Optional properties
+ */
+static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
+{
+ { DRM_MODE_SCALE_NONE, "None" },
+ { DRM_MODE_SCALE_FULLSCREEN, "Full" },
+ { DRM_MODE_SCALE_CENTER, "Center" },
+ { DRM_MODE_SCALE_ASPECT, "Full aspect" },
+};
+
+static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
+{
+ { DRM_MODE_DITHERING_OFF, "Off" },
+ { DRM_MODE_DITHERING_ON, "On" },
+ { DRM_MODE_DITHERING_AUTO, "Automatic" },
+};
+
+/*
+ * Non-global properties, but "required" for certain connectors.
+ */
+static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
+ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
+
+static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
+ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
+ drm_dvi_i_subconnector_enum_list)
+
+static const struct drm_prop_enum_list drm_tv_select_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
+
+static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+ drm_tv_subconnector_enum_list)
+
+static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+ { DRM_MODE_DIRTY_OFF, "Off" },
+ { DRM_MODE_DIRTY_ON, "On" },
+ { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
+};
+
+struct drm_conn_prop_enum_list {
+ int type;
+ const char *name;
+ int count;
+};
+
+/*
+ * Connector and encoder types.
+ */
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
+{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
+ { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
+ { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
+ { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
+ { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
+ { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
+ { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
+ { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
+ { DRM_MODE_CONNECTOR_Component, "Component", 0 },
+ { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
+ { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
+ { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
+ { DRM_MODE_CONNECTOR_TV, "TV", 0 },
+ { DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
+ { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
+};
+
+static const struct drm_prop_enum_list drm_encoder_enum_list[] =
+{ { DRM_MODE_ENCODER_NONE, "None" },
+ { DRM_MODE_ENCODER_DAC, "DAC" },
+ { DRM_MODE_ENCODER_TMDS, "TMDS" },
+ { DRM_MODE_ENCODER_LVDS, "LVDS" },
+ { DRM_MODE_ENCODER_TVDAC, "TV" },
+ { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+};
+
+const char *drm_get_encoder_name(const struct drm_encoder *encoder)
+{
+ static char buf[32];
+
+ (void) snprintf(buf, 32, "%s-%d",
+ drm_encoder_enum_list[encoder->encoder_type].name,
+ encoder->base.id);
+ return buf;
+}
+
+const char *drm_get_connector_name(const struct drm_connector *connector)
+{
+ static char buf[32];
+
+ (void) snprintf(buf, 32, "%s-%d",
+ drm_connector_enum_list[connector->connector_type].name,
+ connector->connector_type_id);
+ return buf;
+}
+
+const char *drm_get_connector_status_name(enum drm_connector_status status)
+{
+ if (status == connector_status_connected)
+ return "connected";
+ else if (status == connector_status_disconnected)
+ return "disconnected";
+ else
+ return "unknown";
+}
+static char printable_char(int c)
+{
+ return (char)(c);
+}
+
+const char *drm_get_format_name(uint32_t format)
+{
+ static char buf[32];
+
+ (void) snprintf(buf, sizeof(buf),
+ "%c%c%c%c %s-endian (0x%08x)",
+ printable_char(format & 0xff),
+ printable_char((format >> 8) & 0xff),
+ printable_char((format >> 16) & 0xff),
+ printable_char((format >> 24) & 0x7f),
+ format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
+ format);
+
+ return buf;
+}
+
+/**
+ * drm_mode_object_get - allocate a new modeset identifier
+ * @dev: DRM device
+ * @obj: object pointer, used to generate unique ID
+ * @obj_type: object type
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space. Used
+ * for tracking modes, CRTCs and connectors.
+ *
+ * RETURNS:
+ * New unique (relative to other objects in @dev) integer identifier for the
+ * object.
+ */
+static int drm_mode_object_get(struct drm_device *dev,
+ struct drm_mode_object *obj, uint32_t obj_type)
+{
+ int new_id = 0;
+ int ret;
+
+again:
+ if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("Ran out memory getting a mode number\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
+
+ if (!ret) {
+ /*
+ * Set up the object linking under the protection of the idr
+ * lock so that other users can't see inconsistent state.
+ */
+ obj->id = new_id;
+ obj->type = obj_type;
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ if (ret == -EAGAIN)
+ goto again;
+ return ret;
+}
+
+/**
+ * drm_mode_object_put - free an identifer
+ * @dev: DRM device
+ * @id: ID to free
+ *
+ *
+ * Free @id from @dev's unique identifier pool.
+ */
+static void drm_mode_object_put(struct drm_device *dev,
+ struct drm_mode_object *object)
+{
+ mutex_lock(&dev->mode_config.idr_mutex);
+ (void) idr_remove(&dev->mode_config.crtc_idr, object->id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+}
+
+/**
+ * drm_mode_object_find - look up a drm object with static lifetime
+ * @dev: drm device
+ * @id: id of the mode object
+ * @type: type of the mode object
+ *
+ * Note that framebuffers cannot be looked up with this functions - since those
+ * are reference counted, they need special treatment.
+ */
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type)
+{
+ struct drm_mode_object *obj = NULL;
+
+ /* Framebuffers are reference counted and need their own lookup
+ * function.*/
+ WARN_ON(type == DRM_MODE_OBJECT_FB);
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ obj = idr_find(&dev->mode_config.crtc_idr, id);
+ if (!obj || (obj->type != type) || (obj->id != id))
+ obj = NULL;
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return obj;
+}
+
+/**
+ * drm_framebuffer_init - initialize a framebuffer
+ * @dev: DRM device
+ *
+ *
+ * Allocates an ID for the framebuffer's parent mode object, sets its mode
+ * functions & device file and adds it to the master fd list.
+ *
+ * IMPORTANT:
+ * This functions publishes the fb and makes it available for concurrent access
+ * by other users. Which means by this point the fb _must_ be fully set up -
+ * since all the fb attributes are invariant over its lifetime, no further
+ * locking but only correct reference counting is required.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ int ret;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ kref_init(&fb->refcount);
+ INIT_LIST_HEAD(&fb->filp_head);
+ fb->dev = dev;
+ fb->funcs = funcs;
+
+ ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
+ if (ret)
+ goto out;
+
+ /* Grab the idr reference. */
+ drm_framebuffer_reference(fb);
+
+ dev->mode_config.num_fb++;
+ list_add(&fb->head, &dev->mode_config.fb_list, (caddr_t)fb);
+out:
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static void drm_framebuffer_free(struct kref *kref)
+{
+ struct drm_framebuffer *fb =
+ container_of(kref, struct drm_framebuffer, refcount);
+ fb->funcs->destroy(fb);
+}
+
+static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *obj = NULL;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ obj = idr_find(&dev->mode_config.crtc_idr, id);
+ if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id))
+ fb = NULL;
+ else
+ fb = obj_to_fb(obj);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return fb;
+}
+
+/**
+ * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
+ * @dev: drm device
+ * @id: id of the fb object
+ *
+ * If successful, this grabs an additional reference to the framebuffer -
+ * callers need to make sure to eventually unreference the returned framebuffer
+ * again.
+ */
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+
+ fb = __drm_framebuffer_lookup(dev, id);
+ if (fb)
+ kref_get(&fb->refcount);
+
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return fb;
+}
+
+/**
+ * drm_framebuffer_unreference - unref a framebuffer
+ *
+ */
+void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ kref_put(&fb->refcount, drm_framebuffer_free);
+}
+
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ */
+void drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ kref_get(&fb->refcount);
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+static void drm_framebuffer_free_bug(struct kref *kref)
+{
+ BUG();
+}
+
+static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+ DRM_DEBUG("FB ID: %d\n", fb->base.id);
+ kref_put(&fb->refcount, drm_framebuffer_free_bug);
+}
+
+/* dev->mode_config.fb_lock must be held! */
+static void __drm_framebuffer_unregister(struct drm_device *dev,
+ struct drm_framebuffer *fb)
+{
+ mutex_lock(&dev->mode_config.idr_mutex);
+ (void) idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ fb->base.id = 0;
+
+ __drm_framebuffer_unreference(fb);
+}
+
+/**
+ * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
+ * @fb: fb to unregister
+ *
+ * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
+ * those used for fbdev. Note that the caller must hold a reference of it's own,
+ * i.e. the object may not be destroyed through this call (since it'll lead to a
+ * locking inversion).
+ */
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ /* Mark fb as reaped and drop idr ref. */
+ __drm_framebuffer_unregister(dev, fb);
+ mutex_unlock(&dev->mode_config.fb_lock);
+}
+
+/**
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ *
+ * Cleanup references to a user-created framebuffer. This function is intended
+ * to be used from the drivers ->destroy callback.
+ *
+ * Note that this function does not remove the fb from active usuage - if it is
+ * still used anywhere, hilarity can ensue since userspace could call getfb on
+ * the id and get back -EINVAL. Obviously no concern at driver unload time.
+ *
+ * Also, the framebuffer will not be removed from the lookup idr - for
+ * user-created framebuffers this will happen in in the rmfb ioctl. For
+ * driver-private objects (e.g. for fbdev) drivers need to explicitly call
+ * drm_framebuffer_unregister_private.
+ */
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+ mutex_lock(&dev->mode_config.fb_lock);
+ list_del(&fb->head);
+ dev->mode_config.num_fb--;
+ mutex_unlock(&dev->mode_config.fb_lock);
+}
+
+/**
+ * drm_framebuffer_remove - remove and unreference a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * using @fb, removes it, setting it to NULL. Then drops the reference to the
+ * passed-in framebuffer. Might take the modeset locks.
+ *
+ * Note that this function optimizes the cleanup away if the caller holds the
+ * last reference to the framebuffer. It is also guaranteed to not take the
+ * modeset locks in this case.
+ */
+void drm_framebuffer_remove(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_mode_set set;
+ int ret;
+
+ WARN_ON(!list_empty(&fb->filp_head));
+
+ /*
+ * drm ABI mandates that we remove any deleted framebuffers from active
+ * useage. But since most sane clients only remove framebuffers they no
+ * longer need, try to optimize this away.
+ *
+ * Since we're holding a reference ourselves, observing a refcount of 1
+ * means that we're the last holder and can skip it. Also, the refcount
+ * can never increase from 1 again, so we don't need any barriers or
+ * locks.
+ *
+ * Note that userspace could try to race with use and instate a new
+ * usage _after_ we've cleared all current ones. End result will be an
+ * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
+ * in this manner.
+ */
+ if (atomic_read(&fb->refcount.refcount) > 1) {
+ drm_modeset_lock_all(dev);
+ /* remove from any CRTC */
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb == fb) {
+ /* should turn off the crtc */
+ (void) memset(&set, 0, sizeof(struct drm_mode_set));
+ set.crtc = crtc;
+ set.fb = NULL;
+ ret = drm_mode_set_config_internal(&set);
+ if (ret)
+ DRM_ERROR("failed to reset crtc %p when fb was deleted\n", (void *)crtc);
+ }
+ }
+ list_for_each_entry(plane, struct drm_plane, &dev->mode_config.plane_list, head) {
+ if (plane->fb == fb)
+ drm_plane_force_disable(plane);
+ }
+ drm_modeset_unlock_all(dev);
+ }
+
+ drm_framebuffer_unreference(fb);
+}
+
+
+/**
+ * drm_crtc_init - Initialise a new CRTC object
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ *
+ * Inits a new object created as base part of an driver crtc object.
+ */
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs)
+{
+ int ret;
+
+ crtc->dev = dev;
+ crtc->funcs = funcs;
+ crtc->invert_dimensions = false;
+
+ drm_modeset_lock_all(dev);
+ mutex_init(&crtc->mutex, NULL, MUTEX_DRIVER, NULL);
+ mutex_lock(&crtc->mutex);
+
+ ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+ if (ret)
+ goto out;
+
+ crtc->base.properties = &crtc->properties;
+
+ list_add_tail(&crtc->head, &dev->mode_config.crtc_list, (caddr_t)crtc);
+ dev->mode_config.num_crtc++;
+
+ out:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+/**
+ * drm_crtc_cleanup - Cleans up the core crtc usage.
+ * @crtc: CRTC to cleanup
+ *
+ *
+ * Cleanup @crtc. Removes from drm modesetting space
+ * does NOT free object, caller does that.
+ */
+void drm_crtc_cleanup(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+
+ if (crtc->gamma_store) {
+ kfree(crtc->gamma_store, crtc->gamma_size * sizeof(uint16_t) * 3);
+ crtc->gamma_store = NULL;
+ }
+
+ drm_mode_object_put(dev, &crtc->base);
+ list_del(&crtc->head);
+ dev->mode_config.num_crtc--;
+}
+
+/**
+ * drm_mode_probed_add - add a mode to a connector's probed mode list
+ * @connector: connector the new mode
+ * @mode: mode data
+ *
+ *
+ * Add @mode to @connector's mode list for later use.
+ */
+void drm_mode_probed_add(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ list_add_tail(&mode->head, &connector->probed_modes, (caddr_t)mode);
+}
+
+/**
+ * drm_mode_remove - remove and free a mode
+ * @connector: connector list to modify
+ * @mode: mode to remove
+ *
+ *
+ * Remove @mode from @connector's mode list, then free it.
+ */
+void drm_mode_remove(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ list_del(&mode->head);
+ drm_mode_destroy(connector->dev, mode);
+}
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @name: user visible name of the connector
+ *
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type)
+{
+ int ret;
+
+ drm_modeset_lock_all(dev);
+
+ ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+ if (ret)
+ goto out;
+
+ connector->base.properties = &connector->properties;
+ connector->dev = dev;
+ connector->funcs = funcs;
+ connector->connector_type = connector_type;
+ connector->connector_type_id =
+ ++drm_connector_enum_list[connector_type].count; /* TODO */
+ INIT_LIST_HEAD(&connector->probed_modes);
+ INIT_LIST_HEAD(&connector->modes);
+ connector->edid_blob_ptr = NULL;
+ connector->status = connector_status_unknown;
+
+ list_add_tail(&connector->head, &dev->mode_config.connector_list, (caddr_t)connector);
+ dev->mode_config.num_connector++;
+
+ if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.edid_property,
+ 0);
+
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.dpms_property, 0);
+
+ out:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+/**
+ * drm_connector_cleanup - cleans up an initialised connector
+ * @connector: connector to cleanup
+ *
+ *
+ * Cleans up the connector but doesn't free the object.
+ */
+void drm_connector_cleanup(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *t;
+
+ list_for_each_entry_safe(mode, t, struct drm_display_mode, &connector->probed_modes, head)
+ drm_mode_remove(connector, mode);
+
+ list_for_each_entry_safe(mode, t, struct drm_display_mode, &connector->modes, head)
+ drm_mode_remove(connector, mode);
+
+ drm_mode_object_put(dev, &connector->base);
+ list_del(&connector->head);
+ dev->mode_config.num_connector--;
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+void drm_connector_unplug_all(struct drm_device *dev)
+{
+ /*
+ struct drm_connector *connector;
+ */
+ /* taking the mode config mutex ends up in a clash with sysfs */
+ /*
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head)
+ drm_sysfs_connector_remove(connector);
+ */
+}
+
+int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type)
+{
+ int ret;
+ drm_modeset_lock_all(dev);
+
+ ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+ if (ret)
+ goto out;
+
+ encoder->dev = dev;
+ encoder->encoder_type = encoder_type;
+ encoder->funcs = funcs;
+
+ list_add_tail(&encoder->head, &dev->mode_config.encoder_list, (caddr_t)encoder);
+ dev->mode_config.num_encoder++;
+
+ out:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+void drm_encoder_cleanup(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ drm_modeset_lock_all(dev);
+ drm_mode_object_put(dev, &encoder->base);
+ list_del(&encoder->head);
+ dev->mode_config.num_encoder--;
+ drm_modeset_unlock_all(dev);
+}
+
+/**
+ * drm_plane_init - Initialise a new plane object
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @format_count: number of elements in @formats
+ * @priv: plane is private (hidden from userspace)?
+ *
+ * Inits a new object created as base part of a driver plane object.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool priv)
+{
+ int ret;
+
+ drm_modeset_lock_all(dev);
+
+ ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+ if (ret)
+ goto out;
+
+ plane->base.properties = &plane->properties;
+ plane->dev = dev;
+ plane->funcs = funcs;
+ plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
+ GFP_KERNEL);
+ if (!plane->format_types) {
+ DRM_DEBUG_KMS("out of memory when allocating plane\n");
+ drm_mode_object_put(dev, &plane->base);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ (void) memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+ plane->format_count = format_count;
+ plane->possible_crtcs = (uint32_t) possible_crtcs;
+
+ /* private planes are not exposed to userspace, but depending on
+ * display hardware, might be convenient to allow sharing programming
+ * for the scanout engine with the crtc implementation.
+ */
+ if (!priv) {
+ list_add_tail(&plane->head, &dev->mode_config.plane_list, (caddr_t)plane);
+ dev->mode_config.num_plane++;
+ } else {
+ INIT_LIST_HEAD(&plane->head);
+ }
+
+ out:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+/**
+ * drm_plane_cleanup - Clean up the core plane usage
+ * @plane: plane to cleanup
+ *
+ * This function cleans up @plane and removes it from the DRM mode setting
+ * core. Note that the function does *not* free the plane structure itself,
+ * this is the responsibility of the caller.
+ */
+void drm_plane_cleanup(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+
+ drm_modeset_lock_all(dev);
+ kfree(plane->format_types, sizeof(uint32_t) * plane->format_count);
+ drm_mode_object_put(dev, &plane->base);
+ /* if not added to a list, it must be a private plane */
+ if (!list_empty(&plane->head)) {
+ list_del(&plane->head);
+ dev->mode_config.num_plane--;
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+/**
+ * drm_plane_force_disable - Forcibly disable a plane
+ * @plane: plane to disable
+ *
+ * Forces the plane to be disabled.
+ *
+ * Used when the plane's current framebuffer is destroyed,
+ * and when restoring fbdev mode.
+ */
+void drm_plane_force_disable(struct drm_plane *plane)
+{
+ int ret;
+
+ if (!plane->fb)
+ return;
+
+ ret = plane->funcs->disable_plane(plane);
+ if (ret)
+ DRM_ERROR("failed to disable plane with busy fb\n");
+ /* disconnect the plane from the fb and crtc: */
+ __drm_framebuffer_unreference(plane->fb);
+ plane->fb = NULL;
+ plane->crtc = NULL;
+}
+
+/**
+ * drm_mode_create - create a new display mode
+ * @dev: DRM device
+ *
+ *
+ * Create a new drm_display_mode, give it an ID, and return it.
+ *
+ * RETURNS:
+ * Pointer to new mode on success, NULL on error.
+ */
+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+{
+ struct drm_display_mode *nmode;
+
+ nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
+ if (!nmode)
+ return NULL;
+
+ if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
+ kfree(nmode, sizeof(struct drm_display_mode));
+ return NULL;
+ }
+
+ return nmode;
+}
+
+/**
+ * drm_mode_destroy - remove a mode
+ * @dev: DRM device
+ * @mode: mode to remove
+ *
+ *
+ * Free @mode's unique identifier, then free it.
+ */
+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+{
+ if (!mode)
+ return;
+
+ drm_mode_object_put(dev, &mode->base);
+
+ kfree(mode, sizeof(struct drm_display_mode));
+}
+
+static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
+{
+ struct drm_property *edid;
+ struct drm_property *dpms;
+
+ /*
+ * Standard properties (apply to all connectors)
+ */
+ edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "EDID", 0);
+ dev->mode_config.edid_property = edid;
+
+ dpms = drm_property_create_enum(dev, 0,
+ "DPMS", drm_dpms_enum_list,
+ ARRAY_SIZE(drm_dpms_enum_list));
+ dev->mode_config.dpms_property = dpms;
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
+ * @dev: DRM device
+ *
+ * Called by a driver the first time a DVI-I connector is made.
+ */
+int drm_mode_create_dvi_i_properties(struct drm_device *dev)
+{
+ struct drm_property *dvi_i_selector;
+ struct drm_property *dvi_i_subconnector;
+
+ if (dev->mode_config.dvi_i_select_subconnector_property)
+ return 0;
+
+ dvi_i_selector =
+ drm_property_create_enum(dev, 0,
+ "select subconnector",
+ drm_dvi_i_select_enum_list,
+ ARRAY_SIZE(drm_dvi_i_select_enum_list));
+ dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+
+ dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "subconnector",
+ drm_dvi_i_subconnector_enum_list,
+ ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+ dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+
+ return 0;
+}
+
+/**
+ * drm_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @num_modes: number of different TV formats (modes) supported
+ * @modes: array of pointers to strings containing name of each format
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device. Caller is
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+ char *modes[])
+{
+ struct drm_property *tv_selector;
+ struct drm_property *tv_subconnector;
+ int i;
+
+ if (dev->mode_config.tv_select_subconnector_property)
+ return 0;
+
+ /*
+ * Basic connector properties
+ */
+ tv_selector = drm_property_create_enum(dev, 0,
+ "select subconnector",
+ drm_tv_select_enum_list,
+ ARRAY_SIZE(drm_tv_select_enum_list));
+ dev->mode_config.tv_select_subconnector_property = tv_selector;
+
+ tv_subconnector =
+ drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "subconnector",
+ drm_tv_subconnector_enum_list,
+ ARRAY_SIZE(drm_tv_subconnector_enum_list));
+ dev->mode_config.tv_subconnector_property = tv_subconnector;
+
+ /*
+ * Other, TV specific properties: margins & TV modes.
+ */
+ dev->mode_config.tv_left_margin_property =
+ drm_property_create_range(dev, 0, "left margin", 0, 100);
+
+ dev->mode_config.tv_right_margin_property =
+ drm_property_create_range(dev, 0, "right margin", 0, 100);
+
+ dev->mode_config.tv_top_margin_property =
+ drm_property_create_range(dev, 0, "top margin", 0, 100);
+
+ dev->mode_config.tv_bottom_margin_property =
+ drm_property_create_range(dev, 0, "bottom margin", 0, 100);
+
+ dev->mode_config.tv_mode_property =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", num_modes);
+ for (i = 0; i < num_modes; i++)
+ (void) drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+ i, modes[i]);
+
+ dev->mode_config.tv_brightness_property =
+ drm_property_create_range(dev, 0, "brightness", 0, 100);
+
+ dev->mode_config.tv_contrast_property =
+ drm_property_create_range(dev, 0, "contrast", 0, 100);
+
+ dev->mode_config.tv_flicker_reduction_property =
+ drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
+
+ dev->mode_config.tv_overscan_property =
+ drm_property_create_range(dev, 0, "overscan", 0, 100);
+
+ dev->mode_config.tv_saturation_property =
+ drm_property_create_range(dev, 0, "saturation", 0, 100);
+
+ dev->mode_config.tv_hue_property =
+ drm_property_create_range(dev, 0, "hue", 0, 100);
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_scaling_mode_property - create scaling mode property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_scaling_mode_property(struct drm_device *dev)
+{
+ struct drm_property *scaling_mode;
+
+ if (dev->mode_config.scaling_mode_property)
+ return 0;
+
+ scaling_mode =
+ drm_property_create_enum(dev, 0, "scaling mode",
+ drm_scaling_mode_enum_list,
+ ARRAY_SIZE(drm_scaling_mode_enum_list));
+
+ dev->mode_config.scaling_mode_property = scaling_mode;
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_dithering_property - create dithering property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dithering_property(struct drm_device *dev)
+{
+ struct drm_property *dithering_mode;
+
+ if (dev->mode_config.dithering_mode_property)
+ return 0;
+
+ dithering_mode =
+ drm_property_create_enum(dev, 0, "dithering",
+ drm_dithering_mode_enum_list,
+ ARRAY_SIZE(drm_dithering_mode_enum_list));
+ dev->mode_config.dithering_mode_property = dithering_mode;
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_dirty_property - create dirty property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dirty_info_property(struct drm_device *dev)
+{
+ struct drm_property *dirty_info;
+
+ if (dev->mode_config.dirty_info_property)
+ return 0;
+
+ dirty_info =
+ drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "dirty",
+ drm_dirty_info_enum_list,
+ ARRAY_SIZE(drm_dirty_info_enum_list));
+ dev->mode_config.dirty_info_property = dirty_info;
+
+ return 0;
+}
+
+
+static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+{
+ uint32_t total_objects = 0;
+
+ total_objects += dev->mode_config.num_crtc;
+ total_objects += dev->mode_config.num_connector;
+ total_objects += dev->mode_config.num_encoder;
+
+ group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
+ if (!group->id_list)
+ return -ENOMEM;
+
+ group->num_crtcs = 0;
+ group->num_connectors = 0;
+ group->num_encoders = 0;
+ return 0;
+}
+
+int drm_mode_group_init_legacy_group(struct drm_device *dev,
+ struct drm_mode_group *group)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ if ((ret = drm_mode_group_init(dev, group)))
+ return ret;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head)
+ group->id_list[group->num_crtcs++] = crtc->base.id;
+
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head)
+ group->id_list[group->num_crtcs + group->num_encoders++] =
+ encoder->base.id;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head)
+ group->id_list[group->num_crtcs + group->num_encoders +
+ group->num_connectors++] = connector->base.id;
+
+ return 0;
+}
+
+
+/**
+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
+ * @out: drm_mode_modeinfo struct to return to the user
+ * @in: drm_display_mode to use
+ *
+ *
+ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
+ * the user.
+ */
+static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
+ const struct drm_display_mode *in)
+{
+ if (in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX ||
+ in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
+ in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
+ in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
+ in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX)
+ DRM_ERROR("timing values too large for mode info\n");
+
+ out->clock = in->clock;
+ out->hdisplay = in->hdisplay;
+ out->hsync_start = in->hsync_start;
+ out->hsync_end = in->hsync_end;
+ out->htotal = in->htotal;
+ out->hskew = in->hskew;
+ out->vdisplay = in->vdisplay;
+ out->vsync_start = in->vsync_start;
+ out->vsync_end = in->vsync_end;
+ out->vtotal = in->vtotal;
+ out->vscan = in->vscan;
+ out->vrefresh = in->vrefresh;
+ out->flags = in->flags;
+ out->type = in->type;
+ (void) strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * @out: drm_display_mode to return to the user
+ * @in: drm_mode_modeinfo to use
+ *
+ *
+ * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
+ * the caller.
+ */
+static int drm_crtc_convert_umode(struct drm_display_mode *out,
+ const struct drm_mode_modeinfo *in)
+{
+ if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
+ return -ERANGE;
+
+ out->clock = in->clock;
+ out->hdisplay = in->hdisplay;
+ out->hsync_start = in->hsync_start;
+ out->hsync_end = in->hsync_end;
+ out->htotal = in->htotal;
+ out->hskew = in->hskew;
+ out->vdisplay = in->vdisplay;
+ out->vsync_start = in->vsync_start;
+ out->vsync_end = in->vsync_end;
+ out->vtotal = in->vtotal;
+ out->vscan = in->vscan;
+ out->vrefresh = in->vrefresh;
+ out->flags = in->flags;
+ out->type = in->type;
+ (void) strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+
+ return 0;
+}
+
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_getresources(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_card_res *card_res = data;
+ struct list_head *lh;
+ struct drm_framebuffer *fb;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int ret = 0;
+ int connector_count = 0;
+ int crtc_count = 0;
+ int fb_count = 0;
+ int encoder_count = 0;
+ int copied = 0, i;
+ uint32_t __user *fb_id;
+ uint32_t __user *crtc_id;
+ uint32_t __user *connector_id;
+ uint32_t __user *encoder_id;
+ struct drm_mode_group *mode_group;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&file->fbs_lock);
+
+ /*
+ * For the non-control nodes we need to limit the list of resources
+ * by IDs in the group list for this node
+ */
+ list_for_each(lh, &file->fbs)
+ fb_count++;
+
+ /* handle this in 4 parts */
+ /* FBs */
+ if (card_res->count_fbs >= fb_count) {
+ copied = 0;
+ fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+ list_for_each_entry(fb, struct drm_framebuffer, &file->fbs, filp_head) {
+ if (put_user(fb->base.id, fb_id + copied)) {
+ mutex_unlock(&file->fbs_lock);
+ return -EFAULT;
+ }
+ copied++;
+ }
+ }
+ card_res->count_fbs = fb_count;
+ mutex_unlock(&file->fbs_lock);
+
+ drm_modeset_lock_all(dev);
+
+ mode_group = &file->master->minor->mode_group;
+ if (file->master->minor->type == DRM_MINOR_CONTROL) {
+
+ list_for_each(lh, &dev->mode_config.crtc_list)
+ crtc_count++;
+
+ list_for_each(lh, &dev->mode_config.connector_list)
+ connector_count++;
+
+ list_for_each(lh, &dev->mode_config.encoder_list)
+ encoder_count++;
+ } else {
+
+ crtc_count = mode_group->num_crtcs;
+ connector_count = mode_group->num_connectors;
+ encoder_count = mode_group->num_encoders;
+ }
+
+ card_res->max_height = dev->mode_config.max_height;
+ card_res->min_height = dev->mode_config.min_height;
+ card_res->max_width = dev->mode_config.max_width;
+ card_res->min_width = dev->mode_config.min_width;
+
+ /* CRTCs */
+ if (card_res->count_crtcs >= crtc_count) {
+ copied = 0;
+ crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+ if (file->master->minor->type == DRM_MINOR_CONTROL) {
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list,
+ head) {
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ if (put_user(crtc->base.id, crtc_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ } else {
+ for (i = 0; i < mode_group->num_crtcs; i++) {
+ if (put_user(mode_group->id_list[i],
+ crtc_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ card_res->count_crtcs = crtc_count;
+
+ /* Encoders */
+ if (card_res->count_encoders >= encoder_count) {
+ copied = 0;
+ encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+ if (file->master->minor->type == DRM_MINOR_CONTROL) {
+ list_for_each_entry(encoder, struct drm_encoder,
+ &dev->mode_config.encoder_list,
+ head) {
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+ drm_get_encoder_name(encoder));
+ if (put_user(encoder->base.id, encoder_id +
+ copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ } else {
+ for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
+ if (put_user(mode_group->id_list[i],
+ encoder_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+
+ }
+ }
+ card_res->count_encoders = encoder_count;
+
+ /* Connectors */
+ if (card_res->count_connectors >= connector_count) {
+ copied = 0;
+ connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+ if (file->master->minor->type == DRM_MINOR_CONTROL) {
+ list_for_each_entry(connector, struct drm_connector,
+ &dev->mode_config.connector_list,
+ head) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector));
+ if (put_user(connector->base.id,
+ connector_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ } else {
+ int start = mode_group->num_crtcs +
+ mode_group->num_encoders;
+ for (i = start; i < start + mode_group->num_connectors; i++) {
+ if (put_user(mode_group->id_list[i],
+ connector_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ card_res->count_connectors = connector_count;
+
+ DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
+ card_res->count_connectors, card_res->count_encoders);
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/**
+ * drm_mode_getcrtc - get CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ *
+ * Construct a CRTC configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_getcrtc(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_crtc *crtc_resp = data;
+ struct drm_crtc *crtc;
+ struct drm_mode_object *obj;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ crtc_resp->x = crtc->x;
+ crtc_resp->y = crtc->y;
+ crtc_resp->gamma_size = crtc->gamma_size;
+ if (crtc->fb)
+ crtc_resp->fb_id = crtc->fb->base.id;
+ else
+ crtc_resp->fb_id = 0;
+
+ if (crtc->enabled) {
+
+ drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
+ crtc_resp->mode_valid = 1;
+
+ } else {
+ crtc_resp->mode_valid = 0;
+ }
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/**
+ * drm_mode_getconnector - get connector configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ *
+ * Construct a connector configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_getconnector(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_get_connector *out_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_connector *connector;
+ struct drm_display_mode *mode;
+ int mode_count = 0;
+ int props_count = 0;
+ int encoders_count = 0;
+ int ret = 0;
+ int copied = 0;
+ int i;
+ struct drm_mode_modeinfo u_mode;
+ struct drm_mode_modeinfo __user *mode_ptr;
+ uint32_t __user *prop_ptr;
+ uint64_t __user *prop_values;
+ uint32_t __user *encoder_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ (void) memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, out_resp->connector_id,
+ DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+
+ props_count = connector->properties.count;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] != 0) {
+ encoders_count++;
+ }
+ }
+
+ if (out_resp->count_modes == 0) {
+ connector->funcs->fill_modes(connector,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ }
+
+ /* delayed so we get modes regardless of pre-fill_modes state */
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head)
+ mode_count++;
+
+ out_resp->connector_id = connector->base.id;
+ out_resp->connector_type = connector->connector_type;
+ out_resp->connector_type_id = connector->connector_type_id;
+ out_resp->mm_width = connector->display_info.width_mm;
+ out_resp->mm_height = connector->display_info.height_mm;
+ out_resp->subpixel = connector->display_info.subpixel_order;
+ out_resp->connection = connector->status;
+ if (connector->encoder)
+ out_resp->encoder_id = connector->encoder->base.id;
+ else
+ out_resp->encoder_id = 0;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if ((out_resp->count_modes >= mode_count) && mode_count) {
+ copied = 0;
+ mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head) {
+ drm_crtc_convert_to_umode(&u_mode, mode);
+ if (DRM_COPY_TO_USER(mode_ptr + copied,
+ &u_mode, sizeof(u_mode))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_modes = mode_count;
+
+ if ((out_resp->count_props >= props_count) && props_count) {
+ copied = 0;
+ prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
+ prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
+ for (i = 0; i < connector->properties.count; i++) {
+ if (put_user(connector->properties.ids[i],
+ prop_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (put_user(connector->properties.values[i],
+ prop_values + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_props = props_count;
+
+ if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+ copied = 0;
+ encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] != 0) {
+ if (put_user(connector->encoder_ids[i],
+ encoder_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ out_resp->count_encoders = encoders_count;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_getencoder(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_get_encoder *enc_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, enc_resp->encoder_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ encoder = obj_to_encoder(obj);
+
+ if (encoder->crtc)
+ enc_resp->crtc_id = encoder->crtc->base.id;
+ else
+ enc_resp->crtc_id = 0;
+ enc_resp->encoder_type = encoder->encoder_type;
+ enc_resp->encoder_id = encoder->base.id;
+ enc_resp->possible_crtcs = encoder->possible_crtcs;
+ enc_resp->possible_clones = encoder->possible_clones;
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/**
+ * drm_mode_getplane_res - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ *
+ * Return an plane count and set of IDs.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_getplane_res(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_get_plane_res *plane_resp = data;
+ struct drm_mode_config *config;
+ struct drm_plane *plane;
+ uint32_t __user *plane_ptr;
+ int copied = 0, ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ config = &dev->mode_config;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (config->num_plane &&
+ (plane_resp->count_planes >= config->num_plane)) {
+ plane_ptr = (uint32_t *)(unsigned long)plane_resp->plane_id_ptr;
+
+ list_for_each_entry(plane, struct drm_plane, &config->plane_list, head) {
+ if (put_user(plane->base.id, plane_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ plane_resp->count_planes = config->num_plane;
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/**
+ * drm_mode_getplane - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ *
+ * Return plane info, including formats supported, gamma size, any
+ * current fb, etc.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_getplane(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_get_plane *plane_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ uint32_t *format_ptr;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, plane_resp->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out;
+ }
+ plane = obj_to_plane(obj);
+
+ if (plane->crtc)
+ plane_resp->crtc_id = plane->crtc->base.id;
+ else
+ plane_resp->crtc_id = 0;
+
+ if (plane->fb)
+ plane_resp->fb_id = plane->fb->base.id;
+ else
+ plane_resp->fb_id = 0;
+
+ plane_resp->plane_id = plane->base.id;
+ plane_resp->possible_crtcs = plane->possible_crtcs;
+ plane_resp->gamma_size = 0;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (plane->format_count &&
+ (plane_resp->count_format_types >= plane->format_count)) {
+ format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+ if (DRM_COPY_TO_USER(format_ptr,
+ plane->format_types,
+ sizeof(uint32_t) * plane->format_count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ plane_resp->count_format_types = plane->format_count;
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/**
+ * drm_mode_setplane - set up or tear down an plane
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_prive: DRM file info
+ *
+ *
+ * Set plane info, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_setplane(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_set_plane *plane_req = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+ int ret = 0;
+ unsigned int fb_width, fb_height;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ /*
+ * First, find the plane, crtc, and fb objects. If not available,
+ * we don't bother to call the driver.
+ */
+ obj = drm_mode_object_find(dev, plane_req->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ plane_req->plane_id);
+ return -ENOENT;
+ }
+ plane = obj_to_plane(obj);
+
+ /* No fb means shut it down */
+ if (!plane_req->fb_id) {
+ drm_modeset_lock_all(dev);
+ old_fb = plane->fb;
+ plane->funcs->disable_plane(plane);
+ plane->crtc = NULL;
+ plane->fb = NULL;
+ drm_modeset_unlock_all(dev);
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, plane_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+ plane_req->crtc_id);
+ ret = -ENOENT;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+ if (!fb) {
+ DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+ plane_req->fb_id);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ /* Check whether this plane supports the fb pixel format. */
+ for (i = 0; i < plane->format_count; i++)
+ if (fb->pixel_format == plane->format_types[i])
+ break;
+ if (i == plane->format_count) {
+ DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fb_width = fb->width << 16;
+ fb_height = fb->height << 16;
+
+ /* Make sure source coordinates are inside the fb. */
+ if (plane_req->src_w > fb_width ||
+ plane_req->src_x > fb_width - plane_req->src_w ||
+ plane_req->src_h > fb_height ||
+ plane_req->src_y > fb_height - plane_req->src_h) {
+ DRM_DEBUG_KMS("Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ plane_req->src_w >> 16,
+ ((plane_req->src_w & 0xffff) * 15625) >> 10,
+ plane_req->src_h >> 16,
+ ((plane_req->src_h & 0xffff) * 15625) >> 10,
+ plane_req->src_x >> 16,
+ ((plane_req->src_x & 0xffff) * 15625) >> 10,
+ plane_req->src_y >> 16,
+ ((plane_req->src_y & 0xffff) * 15625) >> 10);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* Give drivers some help against integer overflows */
+ if (plane_req->crtc_w > INT_MAX ||
+ plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+ plane_req->crtc_h > INT_MAX ||
+ plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->crtc_x, plane_req->crtc_y);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ drm_modeset_lock_all(dev);
+ ret = plane->funcs->update_plane(plane, crtc, fb,
+ plane_req->crtc_x, plane_req->crtc_y,
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->src_x, plane_req->src_y,
+ plane_req->src_w, plane_req->src_h);
+ if (!ret) {
+ old_fb = plane->fb;
+ plane->crtc = crtc;
+ plane->fb = fb;
+ fb = NULL;
+ }
+ drm_modeset_unlock_all(dev);
+
+out:
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
+ return ret;
+}
+
+/**
+ * drm_mode_set_config_internal - helper to call ->set_config
+ * @set: modeset config to set
+ *
+ * This is a little helper to wrap internal calls to the ->set_config driver
+ * interface. The only thing it adds is correct refcounting dance.
+ */
+int drm_mode_set_config_internal(struct drm_mode_set *set)
+{
+ struct drm_crtc *crtc = set->crtc;
+ struct drm_framebuffer *fb;
+ struct drm_crtc *tmp;
+ int ret;
+
+ /*
+ * NOTE: ->set_config can also disable other crtcs (if we steal all
+ * connectors from it), hence we need to refcount the fbs across all
+ * crtcs. Atomic modeset will have saner semantics ...
+ */
+ list_for_each_entry(tmp, struct drm_crtc, &crtc->dev->mode_config.crtc_list, head)
+ tmp->old_fb = tmp->fb;
+
+ fb = set->fb;
+ ret = crtc->funcs->set_config(set);
+ if (ret == 0) {
+ /* crtc->fb must be updated by ->set_config, enforces this. */
+ if (fb != crtc->fb)
+ DRM_ERROR("fb 0x%lx != crtc->fb 0x%lx", (uintptr_t)fb, (uintptr_t)crtc->fb);
+ }
+
+ list_for_each_entry(tmp, struct drm_crtc, &crtc->dev->mode_config.crtc_list, head) {
+ if (tmp->fb)
+ drm_framebuffer_reference(tmp->fb);
+ if (tmp->old_fb)
+ drm_framebuffer_unreference(tmp->old_fb);
+ }
+
+ return ret;
+}
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ *
+ * Build a new CRTC configuration based on user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_setcrtc(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_mode_crtc *crtc_req = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_connector **connector_set = NULL, *connector;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_display_mode *mode = NULL;
+ struct drm_mode_set set;
+ uint32_t __user *set_connectors_ptr;
+ int ret;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ /* For some reason crtc x/y offsets are signed internally. */
+ if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
+ return -ERANGE;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, crtc_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+ if (crtc_req->mode_valid) {
+ int hdisplay, vdisplay;
+ /* If we have a mode we need a framebuffer. */
+ /* If we pass -1, set the mode with the currently bound fb */
+ /* LINTED */
+ if (crtc_req->fb_id == -1) {
+ if (!crtc->fb) {
+ DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ fb = crtc->fb;
+ /* Make refcounting symmetric with the lookup path. */
+ drm_framebuffer_reference(fb);
+ } else {
+ fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
+ if (!fb) {
+ DRM_DEBUG_KMS("Unknown FB ID%d\n",
+ crtc_req->fb_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ mode = drm_mode_create(dev);
+ if (!mode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = drm_crtc_convert_umode(mode, &crtc_req->mode);
+ if (ret) {
+ DRM_DEBUG_KMS("Invalid mode\n");
+ goto out;
+ }
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+
+ hdisplay = mode->hdisplay;
+ vdisplay = mode->vdisplay;
+
+ if (crtc->invert_dimensions)
+ swap(hdisplay, vdisplay);
+
+ if (hdisplay > fb->width ||
+ vdisplay > fb->height ||
+ crtc_req->x > fb->width - hdisplay ||
+ crtc_req->y > fb->height - vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+ fb->width, fb->height,
+ hdisplay, vdisplay, crtc_req->x, crtc_req->y,
+ crtc->invert_dimensions ? " (inverted)" : "");
+ ret = -ENOSPC;
+ goto out;
+ }
+ }
+
+ if (crtc_req->count_connectors == 0 && mode) {
+ DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
+ DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
+ crtc_req->count_connectors);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (crtc_req->count_connectors > 0) {
+ u32 out_id;
+
+ /* Avoid unbounded kernel memory allocation */
+ if (crtc_req->count_connectors > config->num_connector) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ connector_set = kmalloc(crtc_req->count_connectors *
+ sizeof(struct drm_connector *),
+ GFP_KERNEL);
+ if (!connector_set) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < crtc_req->count_connectors; i++) {
+ set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
+ if (get_user(out_id, &set_connectors_ptr[i])) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, out_id,
+ DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ DRM_DEBUG_KMS("Connector id %d unknown\n",
+ out_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector));
+
+ connector_set[i] = connector;
+ }
+ }
+
+ set.crtc = crtc;
+ set.x = crtc_req->x;
+ set.y = crtc_req->y;
+ set.mode = mode;
+ set.connectors = connector_set;
+ set.num_connectors = crtc_req->count_connectors;
+ set.fb = fb;
+ ret = drm_mode_set_config_internal(&set);
+
+out:
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ kfree(connector_set, crtc_req->count_connectors * sizeof(struct drm_connector *));
+ drm_mode_destroy(dev, mode);
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+static int drm_mode_cursor_common(struct drm_device *dev,
+ struct drm_mode_cursor2 *req,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
+ return -EINVAL;
+
+ obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
+ return -EINVAL;
+ }
+ crtc = obj_to_crtc(obj);
+
+ mutex_lock(&crtc->mutex);
+ if (req->flags & DRM_MODE_CURSOR_BO) {
+ if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
+ ret = -ENXIO;
+ goto out;
+ }
+ /* Turns off the cursor if handle is 0 */
+ if (crtc->funcs->cursor_set2)
+ ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
+ req->width, req->height, req->hot_x, req->hot_y);
+ else
+ ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+ req->width, req->height);
+ }
+
+ if (req->flags & DRM_MODE_CURSOR_MOVE) {
+ if (crtc->funcs->cursor_move) {
+ ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+ } else {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&crtc->mutex);
+
+ return ret;
+
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_cursor_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_cursor *req = data;
+ struct drm_mode_cursor2 new_req;
+
+ (void) memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
+ new_req.hot_x = new_req.hot_y = 0;
+
+ return drm_mode_cursor_common(dev, &new_req, file);
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_cursor2_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_cursor2 *req = data;
+ return drm_mode_cursor_common(dev, req, file);
+}
+
+/* Original addfb only supported RGB formats, so figure out which one */
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+ uint32_t fmt;
+
+ switch (bpp) {
+ case 8:
+ fmt = DRM_FORMAT_C8;
+ break;
+ case 16:
+ if (depth == 15)
+ fmt = DRM_FORMAT_XRGB1555;
+ else
+ fmt = DRM_FORMAT_RGB565;
+ break;
+ case 24:
+ fmt = DRM_FORMAT_RGB888;
+ break;
+ case 32:
+ if (depth == 24)
+ fmt = DRM_FORMAT_XRGB8888;
+ else if (depth == 30)
+ fmt = DRM_FORMAT_XRGB2101010;
+ else
+ fmt = DRM_FORMAT_ARGB8888;
+ break;
+ default:
+ DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
+ fmt = DRM_FORMAT_XRGB8888;
+ break;
+ }
+
+ return fmt;
+}
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ *
+ * Add a new FB to the specified CRTC, given a user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_addfb(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_fb_cmd *or = data;
+ struct drm_mode_fb_cmd2 r;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+
+ (void) memset(&r, 0, sizeof(struct drm_mode_fb_cmd2));
+
+ /* Use new struct with format internally */
+ r.fb_id = or->fb_id;
+ r.width = or->width;
+ r.height = or->height;
+ r.pitches[0] = or->pitch;
+ r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+ r.handles[0] = or->handle;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if ((config->min_width > r.width) || (r.width > config->max_width))
+ return -EINVAL;
+
+ if ((config->min_height > r.height) || (r.height > config->max_height))
+ return -EINVAL;
+
+ fb = dev->mode_config.funcs->fb_create(dev, file, &r);
+ if (fb == NULL) {
+ DRM_DEBUG_KMS("could not create framebuffer\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&file->fbs_lock);
+ or->fb_id = fb->base.id;
+ list_add(&fb->filp_head, &file->fbs, (caddr_t)fb);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ mutex_unlock(&file->fbs_lock);
+
+ return ret;
+}
+
+static int format_check(const struct drm_mode_fb_cmd2 *r)
+{
+ uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_XBGR4444:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_BGRX4444:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ABGR4444:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_AYUV:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
+{
+ int ret, hsub, vsub, num_planes, i;
+
+ ret = format_check(r);
+ if (ret) {
+ DRM_DEBUG_KMS("bad framebuffer format %s\n",
+ drm_get_format_name(r->pixel_format));
+ return ret;
+ }
+
+ hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
+ num_planes = drm_format_num_planes(r->pixel_format);
+
+ if (r->width == 0 || r->width % hsub) {
+ DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
+ return -EINVAL;
+ }
+
+ if (r->height == 0 || r->height % vsub) {
+ DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int width = r->width / (i != 0 ? hsub : 1);
+ unsigned int height = r->height / (i != 0 ? vsub : 1);
+ unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+
+ if (!r->handles[i]) {
+ DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+ return -EINVAL;
+ }
+
+ if ((uint64_t) width * cpp > UINT_MAX)
+ return -ERANGE;
+
+ if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+ return -ERANGE;
+
+ if (r->pitches[i] < width * cpp) {
+ DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @arg: arg from ioctl
+ *
+ * Add a new FB to the specified CRTC, given a user request with format.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_addfb2(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_fb_cmd2 *r = data;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+ DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+ return -EINVAL;
+ }
+
+ if ((config->min_width > r->width) || (r->width > config->max_width)) {
+ DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
+ r->width, config->min_width, config->max_width);
+ return -EINVAL;
+ }
+ if ((config->min_height > r->height) || (r->height > config->max_height)) {
+ DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
+ r->height, config->min_height, config->max_height);
+ return -EINVAL;
+ }
+
+ ret = framebuffer_check(r);
+ if (ret)
+ return ret;
+
+ fb = dev->mode_config.funcs->fb_create(dev, file, r);
+ if (fb == NULL) {
+ DRM_DEBUG_KMS("could not create framebuffer\n");
+ return -ENOMEM;
+ }
+
+ mutex_lock(&file->fbs_lock);
+ r->fb_id = fb->base.id;
+ list_add(&fb->filp_head, &file->fbs, (caddr_t)fb);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ mutex_unlock(&file->fbs_lock);
+
+ return ret;
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @arg: arg from ioctl
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED */
+int drm_mode_rmfb(DRM_IOCTL_ARGS)
+{
+ struct drm_framebuffer *fb = NULL;
+ struct drm_framebuffer *fbl = NULL;
+ uint32_t *id = data;
+ int found = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&file->fbs_lock);
+ mutex_lock(&dev->mode_config.fb_lock);
+ fb = __drm_framebuffer_lookup(dev, *id);
+ if (!fb)
+ goto fail_lookup;
+
+ list_for_each_entry(fbl, struct drm_framebuffer, &file->fbs, filp_head)
+ if (fb == fbl)
+ found = 1;
+
+ if (!found)
+ goto fail_lookup;
+
+ /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
+ __drm_framebuffer_unregister(dev, fb);
+
+ list_del_init(&fb->filp_head);
+ mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&file->fbs_lock);
+
+ drm_framebuffer_remove(fb);
+ return 0;
+
+fail_lookup:
+ mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&file->fbs_lock);
+
+ return -EINVAL;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @arg: arg from ioctl
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+/* LINTED */
+int drm_mode_getfb(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_fb_cmd *r = data;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, r->fb_id);
+ if (!fb)
+ return -EINVAL;
+
+ r->height = fb->height;
+ r->width = fb->width;
+ r->depth = fb->depth;
+ r->bpp = fb->bits_per_pixel;
+ r->pitch = fb->pitches[0];
+ if (fb->funcs->create_handle)
+ ret = fb->funcs->create_handle(fb, file, &r->handle);
+ else
+ ret = -ENODEV;
+
+ drm_framebuffer_unreference(fb);
+
+ return ret;
+}
+
+/* LINTED */
+int drm_mode_dirtyfb_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_clip_rect __user *clips_ptr;
+ struct drm_clip_rect *clips = NULL;
+ struct drm_mode_fb_dirty_cmd *r = data;
+ struct drm_framebuffer *fb;
+ unsigned flags;
+ int num_clips;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, r->fb_id);
+ if (!fb)
+ return -EINVAL;
+
+ num_clips = r->num_clips;
+ clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+
+ if (!num_clips != !clips_ptr) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+
+ flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+ /* If userspace annotates copy, clips must come in pairs */
+ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+
+ if (num_clips && clips_ptr) {
+ if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+ clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ if (!clips) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ ret = copy_from_user(clips, clips_ptr,
+ num_clips * sizeof(*clips));
+ if (ret) {
+ ret = -EFAULT;
+ goto out_err2;
+ }
+ }
+
+ if (fb->funcs->dirty) {
+ drm_modeset_lock_all(dev);
+ ret = fb->funcs->dirty(fb, file, flags, r->color,
+ clips, num_clips);
+ drm_modeset_unlock_all(dev);
+ } else {
+ ret = -ENOSYS;
+ }
+
+out_err2:
+ if (clips)
+ kfree(clips, num_clips * sizeof(*clips));
+out_err1:
+ drm_framebuffer_unreference(fb);
+ return ret;
+}
+
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @filp: file * from the ioctl
+ *
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+void drm_fb_release(struct drm_file *priv)
+{
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_framebuffer *fb, *tfb;
+
+ mutex_lock(&priv->fbs_lock);
+ list_for_each_entry_safe(fb, tfb, struct drm_framebuffer, &priv->fbs, filp_head) {
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
+ __drm_framebuffer_unregister(dev, fb);
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ list_del_init(&fb->filp_head);
+
+ /* This will also drop the fpriv->fbs reference. */
+ drm_framebuffer_remove(fb);
+ }
+ mutex_unlock(&priv->fbs_lock);
+}
+
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ const char *name, int num_values)
+{
+ struct drm_property *property = NULL;
+ int ret;
+
+ property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+ if (!property)
+ return NULL;
+
+ if (num_values) {
+ property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
+ if (!property->values)
+ goto fail;
+ }
+
+ ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+ if (ret)
+ goto fail;
+
+ property->flags = flags;
+ property->num_values = num_values;
+ INIT_LIST_HEAD(&property->enum_blob_list);
+
+ if (name) {
+ (void) strncpy(property->name, name, DRM_PROP_NAME_LEN);
+ property->name[DRM_PROP_NAME_LEN-1] = '\0';
+ }
+
+ list_add_tail(&property->head, &dev->mode_config.property_list, (caddr_t)property);
+ return property;
+fail:
+ kfree(property->values, sizeof(uint64_t)*num_values);
+ kfree(property, sizeof(struct drm_property));
+ return NULL;
+}
+struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
+{
+ struct drm_property *property;
+ int i, ret;
+
+ flags |= DRM_MODE_PROP_ENUM;
+
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
+
+ for (i = 0; i < num_values; i++) {
+ ret = drm_property_add_enum(property, i,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+ }
+
+ return property;
+}
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
+{
+ struct drm_property *property;
+ int i, ret;
+
+ flags |= DRM_MODE_PROP_BITMASK;
+
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
+
+ for (i = 0; i < num_values; i++) {
+ ret = drm_property_add_enum(property, i,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+ }
+
+ return property;
+}
+
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max)
+{
+ struct drm_property *property;
+
+ flags |= DRM_MODE_PROP_RANGE;
+
+ property = drm_property_create(dev, flags, name, 2);
+ if (!property)
+ return NULL;
+
+ property->values[0] = min;
+ property->values[1] = max;
+
+ return property;
+}
+
+int drm_property_add_enum(struct drm_property *property, int index,
+ uint64_t value, const char *name)
+{
+ struct drm_property_enum *prop_enum;
+
+ if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
+ return -EINVAL;
+
+ /*
+ * Bitmask enum properties have the additional constraint of values
+ * from 0 to 63
+ */
+ if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
+ return -EINVAL;
+
+ if (!list_empty(&property->enum_blob_list)) {
+ list_for_each_entry(prop_enum, struct drm_property_enum, &property->enum_blob_list, head) {
+ if (prop_enum->value == value) {
+ (void) strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+ return 0;
+ }
+ }
+ }
+
+ prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
+ if (!prop_enum)
+ return -ENOMEM;
+
+ (void) strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+ prop_enum->value = value;
+
+ property->values[index] = value;
+ list_add_tail(&prop_enum->head, &property->enum_blob_list, (caddr_t)prop_enum);
+ return 0;
+}
+
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+ struct drm_property_enum *prop_enum, *pt;
+
+ list_for_each_entry_safe(prop_enum, pt, struct drm_property_enum, &property->enum_blob_list, head) {
+ list_del(&prop_enum->head);
+ kfree(prop_enum, sizeof(struct drm_property_enum));
+ }
+
+ if (property->num_values)
+ kfree(property->values, sizeof(uint64_t) * property->num_values);
+ drm_mode_object_put(dev, &property->base);
+ list_del(&property->head);
+ kfree(property, sizeof(struct drm_property));
+}
+
+
+void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val)
+{
+ int count = obj->properties->count;
+
+ if (count == DRM_OBJECT_MAX_PROPERTY) {
+ DRM_ERROR("Failed to attach object property (type: 0x%x). Please "
+ "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+ "you see this message on the same object type.\n",
+ obj->type);
+ return;
+ }
+
+ obj->properties->ids[count] = property->base.id;
+ obj->properties->values[count] = init_val;
+ obj->properties->count++;
+}
+
+int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t val)
+{
+ int i;
+
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ obj->properties->values[i] = val;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t *val)
+{
+ int i;
+
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ *val = obj->properties->values[i];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/* LINTED */
+int drm_mode_getproperty_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_object *obj;
+ struct drm_mode_get_property *out_resp = data;
+ struct drm_property *property;
+ int enum_count = 0;
+ int blob_count = 0;
+ int value_count = 0;
+ int ret = 0, i;
+ int copied;
+ struct drm_property_enum *prop_enum;
+ struct drm_mode_property_enum __user *enum_ptr;
+ struct drm_property_blob *prop_blob;
+ uint32_t *blob_id_ptr;
+ uint64_t *values_ptr;
+ uint32_t *blob_length_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+ if (!obj) {
+ ret = -EINVAL;
+ goto done;
+ }
+ property = obj_to_property(obj);
+
+ if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
+ list_for_each_entry(prop_enum, struct drm_property_enum, &property->enum_blob_list, head)
+ enum_count++;
+ } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ list_for_each_entry(prop_blob, struct drm_property_blob, &property->enum_blob_list, head)
+ blob_count++;
+ }
+
+ value_count = property->num_values;
+
+ (void) strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+ out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+ out_resp->flags = property->flags;
+
+ if ((out_resp->count_values >= value_count) && value_count) {
+ values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
+ for (i = 0; i < value_count; i++) {
+ if (DRM_COPY_TO_USER(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
+ ret = -EFAULT;
+ goto done;
+ }
+ }
+ }
+ out_resp->count_values = value_count;
+
+ if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
+ if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+ copied = 0;
+ enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
+ list_for_each_entry(prop_enum, struct drm_property_enum, &property->enum_blob_list, head) {
+
+ if (DRM_COPY_TO_USER(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (DRM_COPY_TO_USER(&enum_ptr[copied].name,
+ &prop_enum->name, DRM_PROP_NAME_LEN)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_enum_blobs = enum_count;
+ }
+
+ if (property->flags & DRM_MODE_PROP_BLOB) {
+ if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
+ copied = 0;
+ blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
+ blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
+
+ list_for_each_entry(prop_blob, struct drm_property_blob, &property->enum_blob_list, head) {
+ if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (put_user(prop_blob->length, blob_length_ptr + copied)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ copied++;
+ }
+ }
+ out_resp->count_enum_blobs = blob_count;
+ }
+done:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
+ void *data)
+{
+ struct drm_property_blob *blob;
+ int ret;
+
+ if (!length || !data)
+ return NULL;
+
+ blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+ if (!blob)
+ return NULL;
+
+ ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+ if (ret) {
+ kfree(blob, sizeof(struct drm_property_blob)+length);
+ return NULL;
+ }
+
+ blob->length = length;
+
+ (void) memcpy(blob->data, data, length);
+
+ list_add_tail(&blob->head, &dev->mode_config.property_blob_list, (caddr_t)blob);
+ return blob;
+}
+
+static void drm_property_destroy_blob(struct drm_device *dev,
+ struct drm_property_blob *blob)
+{
+ drm_mode_object_put(dev, &blob->base);
+ list_del(&blob->head);
+ kfree(blob, sizeof(struct drm_property_blob) + blob->length);
+}
+
+/* LINTED */
+int drm_mode_getblob_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_object *obj;
+ struct drm_mode_get_blob *out_resp = data;
+ struct drm_property_blob *blob;
+ int ret = 0;
+ void *blob_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
+ if (!obj) {
+ ret = -EINVAL;
+ goto done;
+ }
+ blob = obj_to_blob(obj);
+
+ if (out_resp->length == blob->length) {
+ blob_ptr = (void *)(unsigned long)out_resp->data;
+ if (DRM_COPY_TO_USER(blob_ptr, blob->data, blob->length)){
+ ret = -EFAULT;
+ goto done;
+ }
+ }
+ out_resp->length = blob->length;
+
+done:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ int ret, size;
+
+ if (connector->edid_blob_ptr)
+ drm_property_destroy_blob(dev, connector->edid_blob_ptr);
+
+ /* Delete edid, when there is none. */
+ if (!edid) {
+ connector->edid_blob_ptr = NULL;
+ ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
+ return ret;
+ }
+
+ size = EDID_LENGTH * (1 + edid->extensions);
+ connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
+ size, edid);
+ if (!connector->edid_blob_ptr)
+ return -EINVAL;
+
+ ret = drm_object_property_set_value(&connector->base,
+ dev->mode_config.edid_property,
+ connector->edid_blob_ptr->base.id);
+
+ return ret;
+}
+
+static bool drm_property_change_is_valid(struct drm_property *property,
+ uint64_t value)
+{
+ if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ return false;
+ if (property->flags & DRM_MODE_PROP_RANGE) {
+ if (value < property->values[0] || value > property->values[1])
+ return false;
+ return true;
+ } else if (property->flags & DRM_MODE_PROP_BITMASK) {
+ int i;
+ uint64_t valid_mask = 0;
+ for (i = 0; i < property->num_values; i++)
+ valid_mask |= (1ULL << property->values[i]);
+ return !(value & ~valid_mask);
+ } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ /* Only the driver knows */
+ return true;
+ } else {
+ int i;
+ for (i = 0; i < property->num_values; i++)
+ if (property->values[i] == value)
+ return true;
+ return false;
+ }
+}
+
+int drm_mode_connector_property_set_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_connector_set_property *conn_set_prop = data;
+ struct drm_mode_obj_set_property obj_set_prop = {
+ .value = conn_set_prop->value,
+ .prop_id = conn_set_prop->prop_id,
+ .obj_id = conn_set_prop->connector_id,
+ .obj_type = DRM_MODE_OBJECT_CONNECTOR
+ };
+
+ /* It does all the locking and checking we need */
+ return drm_mode_obj_set_property_ioctl(dev_id, dev, &obj_set_prop, file, ioctl_mode, credp);
+}
+
+static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_connector *connector = obj_to_connector(obj);
+
+ /* Do DPMS ourselves */
+ if (property == connector->dev->mode_config.dpms_property) {
+ if (connector->funcs->dpms)
+ (*connector->funcs->dpms)(connector, (int)value);
+ ret = 0;
+ } else if (connector->funcs->set_property)
+ ret = connector->funcs->set_property(connector, property, value);
+
+ /* store the property value if successful */
+ if (!ret)
+ ret = drm_object_property_set_value(&connector->base, property, value);
+ return ret;
+}
+
+static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->set_property)
+ ret = crtc->funcs->set_property(crtc, property, value);
+ if (!ret)
+ ret = drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_plane *plane = obj_to_plane(obj);
+
+ if (plane->funcs->set_property)
+ ret = plane->funcs->set_property(plane, property, value);
+ if (!ret)
+ ret = drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+/* LINTED */
+int drm_mode_obj_get_properties_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_obj_get_properties *arg = data;
+ struct drm_mode_object *obj;
+ int ret = 0;
+ int i;
+ int copied = 0;
+ int props_count = 0;
+ uint32_t __user *props_ptr;
+ uint64_t __user *prop_values_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!obj->properties) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ props_count = obj->properties->count;
+
+ /* This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it. */
+ if ((arg->count_props >= props_count) && props_count) {
+ copied = 0;
+ props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+ prop_values_ptr = (uint64_t __user *)(unsigned long)
+ (arg->prop_values_ptr);
+ for (i = 0; i < props_count; i++) {
+ if (put_user(obj->properties->ids[i],
+ props_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (put_user(obj->properties->values[i],
+ prop_values_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ arg->count_props = props_count;
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/* LINTED */
+int drm_mode_obj_set_property_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_obj_set_property *arg = data;
+ struct drm_mode_object *arg_obj;
+ struct drm_mode_object *prop_obj;
+ struct drm_property *property;
+ int ret = -EINVAL;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ if (!arg_obj)
+ goto out;
+ if (!arg_obj->properties)
+ goto out;
+
+ for (i = 0; i < arg_obj->properties->count; i++)
+ if (arg_obj->properties->ids[i] == arg->prop_id)
+ break;
+
+ if (i == arg_obj->properties->count)
+ goto out;
+
+ prop_obj = drm_mode_object_find(dev, arg->prop_id,
+ DRM_MODE_OBJECT_PROPERTY);
+ if (!prop_obj)
+ goto out;
+ property = obj_to_property(prop_obj);
+
+ if (!drm_property_change_is_valid(property, arg->value))
+ goto out;
+
+ switch (arg_obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR:
+ ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+ arg->value);
+ break;
+ case DRM_MODE_OBJECT_CRTC:
+ ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+ break;
+ case DRM_MODE_OBJECT_PLANE:
+ ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
+ break;
+ }
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+{
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0) {
+ connector->encoder_ids[i] = encoder->base.id;
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+
+void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+{
+ int i;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == encoder->base.id) {
+ connector->encoder_ids[i] = 0;
+ if (connector->encoder == encoder)
+ connector->encoder = NULL;
+ break;
+ }
+ }
+}
+
+int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ int gamma_size)
+{
+ crtc->gamma_size = gamma_size;
+
+ crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
+ if (!crtc->gamma_store) {
+ crtc->gamma_size = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* LINTED */
+int drm_mode_gamma_set_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_crtc_lut *crtc_lut = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ void *r_base, *g_base, *b_base;
+ int size;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->gamma_set == NULL) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
+ /* memcpy into gamma store */
+ if (crtc_lut->gamma_size != crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ size = crtc_lut->gamma_size * (sizeof(uint16_t));
+ r_base = crtc->gamma_store;
+ if (DRM_COPY_FROM_USER(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ g_base = (void *)((uintptr_t)r_base + size);
+ if (DRM_COPY_FROM_USER(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ b_base = (void *)((uintptr_t)g_base + size);
+ if (DRM_COPY_FROM_USER(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+
+}
+
+/* LINTED */
+int drm_mode_gamma_get_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_crtc_lut *crtc_lut = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ void *r_base, *g_base, *b_base;
+ int size;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ /* memcpy into gamma store */
+ if (crtc_lut->gamma_size != crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ size = crtc_lut->gamma_size * (sizeof(uint16_t));
+ r_base = crtc->gamma_store;
+ if (DRM_COPY_TO_USER((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ g_base =(void *)((long)r_base + size);
+ if (DRM_COPY_TO_USER((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ b_base = (void *)((long)g_base + size);
+ if (DRM_COPY_TO_USER((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+/* LINTED */
+int drm_mode_page_flip_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_crtc_page_flip *page_flip = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+ struct drm_pending_vblank_event *e = NULL;
+ unsigned long flags;
+ int hdisplay, vdisplay;
+ int ret = -EINVAL;
+
+ if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+ page_flip->reserved != 0)
+ return -EINVAL;
+
+ obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj)
+ return -EINVAL;
+ crtc = obj_to_crtc(obj);
+
+ mutex_lock(&crtc->mutex);
+ if (crtc->fb == NULL) {
+ /* The framebuffer is currently unbound, presumably
+ * due to a hotplug event, that userspace has not
+ * yet discovered.
+ */
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (crtc->funcs->page_flip == NULL)
+ goto out;
+
+ fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
+ if (!fb)
+ goto out;
+
+ hdisplay = crtc->mode.hdisplay;
+ vdisplay = crtc->mode.vdisplay;
+
+ if (crtc->invert_dimensions)
+ swap(hdisplay, vdisplay);
+
+ if (hdisplay > fb->width ||
+ vdisplay > fb->height ||
+ crtc->x > fb->width - hdisplay ||
+ crtc->y > fb->height - vdisplay) {
+ DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+ fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
+ crtc->invert_dimensions ? " (inverted)" : "");
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ if (crtc->fb->pixel_format != fb->pixel_format) {
+ DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ ret = -ENOMEM;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (file->event_space < sizeof e->event) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+ file->event_space -= sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (e == NULL) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+
+ e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = page_flip->user_data;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy =
+ (void (*) (void *, size_t)) kfree;
+ }
+
+ old_fb = crtc->fb;
+ ret = crtc->funcs->page_flip(crtc, fb, e);
+ if (ret) {
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e, sizeof(*e));
+ }
+ /* Keep the old fb, don't unref it. */
+ old_fb = NULL;
+ } else {
+ /*
+ * Warn if the driver hasn't properly updated the crtc->fb
+ * field to reflect that the new framebuffer is now used.
+ * Failing to do so will screw with the reference counting
+ * on framebuffers.
+ */
+ WARN_ON(crtc->fb != fb);
+ /* Unref only the old framebuffer. */
+ fb = NULL;
+ }
+
+out:
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ if (old_fb)
+ drm_framebuffer_unreference(old_fb);
+ mutex_unlock(&crtc->mutex);
+
+ return ret;
+}
+
+void drm_mode_config_reset(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head)
+ if (crtc->funcs->reset)
+ crtc->funcs->reset(crtc);
+
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->funcs->reset)
+ encoder->funcs->reset(encoder);
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head)
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_create_dumb_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_create_dumb *args = data;
+
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
+ return dev->driver->dumb_create(file, dev, args);
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_mmap_dumb_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_map_dumb *args = data;
+
+ /* call driver ioctl to get mmap offset */
+ if (!dev->driver->dumb_map_offset)
+ return -ENOSYS;
+
+ return dev->driver->dumb_map_offset(file, dev, args->handle, &args->offset);
+}
+
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_mode_destroy_dumb_ioctl(DRM_IOCTL_ARGS)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ if (!dev->driver->dumb_destroy)
+ return -ENOSYS;
+
+ return dev->driver->dumb_destroy(file, dev, args->handle);
+}
+
+/*
+ * Just need to support RGB formats here for compat with code that doesn't
+ * use pixel formats directly yet.
+ */
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ *depth = 8;
+ *bpp = 8;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ *depth = 15;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ *depth = 16;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ *depth = 24;
+ *bpp = 24;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ *depth = 24;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ *depth = 30;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ *depth = 32;
+ *bpp = 32;
+ break;
+ default:
+ DRM_DEBUG_KMS("unsupported pixel format\n");
+ *depth = 0;
+ *bpp = 0;
+ break;
+ }
+}
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 3;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+/**
+ * drm_format_plane_cpp - determine the bytes per pixel value
+ * @format: pixel format (DRM_FORMAT_*)
+ * @plane: plane index
+ *
+ * RETURNS:
+ * The bytes per pixel value for the specified plane.
+ */
+int drm_format_plane_cpp(uint32_t format, int plane)
+{
+ unsigned int depth;
+ int bpp;
+
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ return 2;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ return plane ? 2 : 1;
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 1;
+ default:
+ drm_fb_get_bpp_depth(format, &depth, &bpp);
+ return bpp >> 3;
+ }
+}
+
+/**
+ * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The horizontal chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_horz_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+/**
+ * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The vertical chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_vert_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ *
+ * Since this initializes the modeset locks, no locking is possible. Which is no
+ * problem, since this should happen single threaded at init time. It is the
+ * driver's problem to ensure this guarantee.
+ *
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+ mutex_init(&dev->mode_config.mutex, NULL, MUTEX_DRIVER, NULL);
+ mutex_init(&dev->mode_config.idr_mutex, NULL, MUTEX_DRIVER, NULL);
+ mutex_init(&dev->mode_config.fb_lock, NULL, MUTEX_DRIVER, NULL);
+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
+ INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+ INIT_LIST_HEAD(&dev->mode_config.connector_list);
+ INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ INIT_LIST_HEAD(&dev->mode_config.plane_list);
+ idr_init(&dev->mode_config.crtc_idr);
+
+ drm_modeset_lock_all(dev);
+ (void) drm_mode_create_standard_connector_properties(dev);
+ drm_modeset_unlock_all(dev);
+
+ /* Just to be sure */
+ dev->mode_config.num_fb = 0;
+ dev->mode_config.num_connector = 0;
+ dev->mode_config.num_crtc = 0;
+ dev->mode_config.num_encoder = 0;
+}
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * Note that since this /should/ happen single-threaded at driver/device
+ * teardown time, no locking is required. It's the driver's job to ensure that
+ * this guarantee actually holds true.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+ struct drm_connector *connector, *ot;
+ struct drm_crtc *crtc, *ct;
+ struct drm_encoder *encoder, *enct;
+ struct drm_framebuffer *fb, *fbt;
+ struct drm_property *property, *pt;
+ struct drm_property_blob *blob, *bt;
+ struct drm_plane *plane, *plt;
+
+ list_for_each_entry_safe(encoder, enct, struct drm_encoder, &dev->mode_config.encoder_list,
+ head) {
+ encoder->funcs->destroy(encoder);
+ }
+
+ list_for_each_entry_safe(connector, ot, struct drm_connector,
+ &dev->mode_config.connector_list, head) {
+ connector->funcs->destroy(connector);
+ }
+
+ list_for_each_entry_safe(property, pt, struct drm_property, &dev->mode_config.property_list,
+ head) {
+ drm_property_destroy(dev, property);
+ }
+
+ list_for_each_entry_safe(blob, bt, struct drm_property_blob, &dev->mode_config.property_blob_list,
+ head) {
+ drm_property_destroy_blob(dev, blob);
+ }
+
+ /*
+ * Single-threaded teardown context, so it's not required to grab the
+ * fb_lock to protect against concurrent fb_list access. Contrary, it
+ * would actually deadlock with the drm_framebuffer_cleanup function.
+ *
+ * Also, if there are any framebuffers left, that's a driver leak now,
+ * so politely WARN about this.
+ */
+ WARN_ON(!list_empty(&dev->mode_config.fb_list));
+ list_for_each_entry_safe(fb, fbt, struct drm_framebuffer, &dev->mode_config.fb_list, head) {
+ drm_framebuffer_remove(fb);
+ }
+
+ list_for_each_entry_safe(plane, plt, struct drm_plane, &dev->mode_config.plane_list,
+ head) {
+ plane->funcs->destroy(plane);
+ }
+
+ list_for_each_entry_safe(crtc, ct, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+ idr_remove_all(&dev->mode_config.crtc_idr);
+ idr_destroy(&dev->mode_config.crtc_idr);
+}
diff --git a/usr/src/uts/common/io/drm/drm_crtc_helper.c b/usr/src/uts/common/io/drm/drm_crtc_helper.c
new file mode 100644
index 0000000..50eb0be
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_crtc_helper.c
@@ -0,0 +1,1084 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006-2008, 2013, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fourcc.h"
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "drm_edid.h"
+
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+ struct drm_connector *connector, *tmp;
+ struct list_head panel_list;
+ struct list_head *tmp2;
+
+ INIT_LIST_HEAD(&panel_list);
+
+ list_for_each_entry_safe(connector, tmp, struct drm_connector,
+ &dev->mode_config.connector_list, head) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ list_move_tail(&connector->head, &panel_list, (caddr_t)connector);
+ }
+ tmp2 = dev->mode_config.connector_list.next;
+ list_splice(&panel_list, &dev->mode_config.connector_list, tmp2);
+}
+
+static bool drm_kms_helper_poll = true;
+
+static void drm_mode_validate_flag(struct drm_connector *connector,
+ int flags)
+{
+ struct drm_display_mode *mode;
+
+ if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
+ return;
+
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head) {
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+ !(flags & DRM_MODE_FLAG_INTERLACE))
+ mode->status = MODE_NO_INTERLACE;
+ if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
+ !(flags & DRM_MODE_FLAG_DBLSCAN))
+ mode->status = MODE_NO_DBLESCAN;
+ }
+
+ return;
+}
+
+/**
+ * drm_helper_probe_connector_modes - get complete set of display modes
+ * @dev: DRM device
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Based on @dev's mode_config layout, scan all the connectors and try to detect
+ * modes on them. Modes will first be added to the connector's probed_modes
+ * list, then culled (based on validity and the @maxX, @maxY parameters) and
+ * put into the normal modes list.
+ *
+ * Intended to be used either at bootup time or when major configuration
+ * changes have occurred.
+ *
+ * FIXME: take into account monitor limits
+ *
+ * RETURNS:
+ * Number of modes found on @connector.
+ */
+int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ int count = 0;
+ int mode_flags = 0;
+ bool verbose_prune = true;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
+ drm_get_connector_name(connector));
+ /* set all modes to the unverified state */
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head)
+ mode->status = MODE_UNVERIFIED;
+
+ if (connector->force) {
+ if (connector->force == DRM_FORCE_ON)
+ connector->status = connector_status_connected;
+ else
+ connector->status = connector_status_disconnected;
+ if (connector->funcs->force)
+ connector->funcs->force(connector);
+ } else {
+ connector->status = connector->funcs->detect(connector, true);
+ }
+ /* Re-enable polling in case the global poll config changed. */
+ if (drm_kms_helper_poll != dev->mode_config.poll_running)
+ drm_kms_helper_poll_enable(dev);
+
+ dev->mode_config.poll_running = drm_kms_helper_poll;
+
+ if (connector->status == connector_status_disconnected) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
+ connector->base.id, drm_get_connector_name(connector));
+ (void) drm_mode_connector_update_edid_property(connector, NULL);
+ verbose_prune = false;
+ goto prune;
+ }
+
+#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
+ count = drm_load_edid_firmware(connector);
+ if (count == 0)
+#endif
+ count = (*connector_funcs->get_modes)(connector);
+
+ if (count == 0 && connector->status == connector_status_connected)
+ count = drm_add_modes_noedid(connector, 1024, 768);
+ if (count == 0)
+ goto prune;
+
+ drm_mode_connector_list_update(connector);
+
+ if (maxX && maxY)
+ drm_mode_validate_size(dev, &connector->modes, maxX,
+ maxY, 0);
+
+ if (connector->interlace_allowed)
+ mode_flags |= DRM_MODE_FLAG_INTERLACE;
+ if (connector->doublescan_allowed)
+ mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+ drm_mode_validate_flag(connector, mode_flags);
+
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head){
+ if (mode->status == MODE_OK)
+ mode->status = connector_funcs->mode_valid(connector,
+ mode);
+ }
+
+prune:
+ drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
+
+ if (list_empty(&connector->modes))
+ return 0;
+
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head)
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ drm_mode_sort(&connector->modes);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
+ drm_get_connector_name(connector));
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head){
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_debug_printmodeline(mode);
+ }
+
+ return count;
+}
+
+
+/**
+ * drm_helper_encoder_in_use - check if a given encoder is in use
+ * @encoder: encoder to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @encoders's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @encoder is part of the mode_config, false otherwise.
+ */
+bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ return true;
+ return false;
+}
+
+/**
+ * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
+ * @crtc: CRTC to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @crtc's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @crtc is part of the mode_config, false otherwise.
+ */
+bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev = crtc->dev;
+ /* FIXME: Locking around list access? */
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
+ return true;
+ return false;
+}
+static void
+drm_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
+ if (encoder_funcs->disable)
+ (*encoder_funcs->disable)(encoder);
+ else
+ (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+}
+
+/**
+ * drm_helper_disable_unused_functions - disable unused objects
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
+ * by calling its dpms function, which should power it off.
+ */
+void drm_helper_disable_unused_functions(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ continue;
+ if (connector->status == connector_status_disconnected)
+ connector->encoder = NULL;
+ }
+
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+ if (!drm_helper_encoder_in_use(encoder)) {
+ drm_encoder_disable(encoder);
+ /* disconnector encoder from any connector */
+ encoder->crtc = NULL;
+ }
+ }
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+ if (!crtc->enabled) {
+ if (crtc_funcs->disable)
+ (*crtc_funcs->disable)(crtc);
+ else
+ (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
+ crtc->fb = NULL;
+ }
+ }
+}
+
+
+/**
+ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
+ * @encoder: encoder to test
+ * @crtc: crtc to test
+ *
+ * Return false if @encoder can't be driven by @crtc, true otherwise.
+ */
+static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
+ struct drm_crtc *crtc)
+{
+ struct drm_device *dev;
+ struct drm_crtc *tmp;
+ int crtc_mask = 1;
+
+ if (crtc == NULL)
+ DRM_ERROR("checking null crtc?\n");
+
+ dev = crtc->dev;
+
+ list_for_each_entry(tmp, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ if (tmp == crtc)
+ break;
+ crtc_mask <<= 1;
+ }
+
+ if (encoder->possible_crtcs & crtc_mask)
+ return true;
+ return false;
+}
+
+/*
+ * Check the CRTC we're going to map each output to vs. its current
+ * CRTC. If they don't match, we have to disable the output and the CRTC
+ * since the driver will have to re-route things.
+ */
+static void
+drm_crtc_prepare_encoders(struct drm_device *dev)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+ encoder_funcs = encoder->helper_private;
+ /* Disable unused encoders */
+ if (encoder->crtc == NULL)
+ drm_encoder_disable(encoder);
+ /* Disable encoders whose CRTC is about to change */
+ if (encoder_funcs->get_crtc &&
+ encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
+ drm_encoder_disable(encoder);
+ }
+}
+
+/**
+ * drm_crtc_set_mode - set a mode
+ * @crtc: CRTC to program
+ * @mode: mode to use
+ * @x: width of mode
+ * @y: height of mode
+ * @old_fb: old framebuffer, for cleanup
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
+ *
+ * RETURNS:
+ * True if the mode was set successfully, or false otherwise.
+ */
+bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ int saved_x, saved_y;
+ struct drm_encoder *encoder;
+ bool ret = true;
+
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+ if (!crtc->enabled)
+ return true;
+
+ adjusted_mode = drm_mode_duplicate(dev, mode);
+ if (!adjusted_mode)
+ return false;
+
+ saved_hwmode = crtc->hwmode;
+ saved_mode = crtc->mode;
+ saved_x = crtc->x;
+ saved_y = crtc->y;
+
+ /* Update crtc values up front so the driver can rely on them for mode
+ * setting.
+ */
+ crtc->mode = *mode;
+ crtc->x = x;
+ crtc->y = y;
+
+ /* Pass our mode to the connectors and the CRTC to give them a chance to
+ * adjust it according to limitations or connector properties, and also
+ * a chance to reject the mode entirely.
+ */
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+ encoder_funcs = encoder->helper_private;
+ if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
+ adjusted_mode))) {
+ DRM_DEBUG_KMS("Encoder fixup failed\n");
+ goto done;
+ }
+ }
+
+ if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
+ DRM_DEBUG_KMS("CRTC fixup failed\n");
+ goto done;
+ }
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+ /* Prepare the encoders and CRTCs before setting the mode. */
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+ encoder_funcs = encoder->helper_private;
+ /* Disable the encoders as the first thing we do. */
+ encoder_funcs->prepare(encoder);
+ }
+
+ drm_crtc_prepare_encoders(dev);
+
+ crtc_funcs->prepare(crtc);
+
+ /* Set up the DPLL and any encoders state that needs to adjust or depend
+ * on the DPLL.
+ */
+ ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+ if (!ret)
+ goto done;
+
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.id, drm_get_encoder_name(encoder),
+ mode->base.id, mode->name);
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+ }
+
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ crtc_funcs->commit(crtc);
+
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->commit(encoder);
+
+ }
+
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = *adjusted_mode;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc);
+
+ /* FIXME: add subpixel order */
+done:
+ drm_mode_destroy(dev, adjusted_mode);
+ if (!ret) {
+ crtc->hwmode = saved_hwmode;
+ crtc->mode = saved_mode;
+ crtc->x = saved_x;
+ crtc->y = saved_y;
+ }
+
+ return ret;
+}
+
+static int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ /* Decouple all encoders and their attached connectors from this crtc */
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ connector->encoder = NULL;
+ }
+ }
+
+ drm_helper_disable_unused_functions(dev);
+ return 0;
+}
+
+/**
+ * drm_crtc_helper_set_config - set a new config from userspace
+ * @set: mode set configuration
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
+ *
+ * RETURNS:
+ * Zero. (FIXME)
+ */
+int drm_crtc_helper_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct drm_crtc *save_crtcs, *new_crtc, *crtc;
+ struct drm_encoder *save_encoders, *new_encoder, *encoder;
+ struct drm_framebuffer *old_fb = NULL;
+ bool mode_changed = false; /* if true do a full mode set */
+ bool fb_changed = false; /* if true and !mode_changed just do a flip */
+ struct drm_connector *save_connectors, *connector;
+ int count = 0, ro, fail = 0;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_mode_set save_set;
+ int ret;
+ int i;
+
+ DRM_DEBUG_KMS("\n");
+
+ BUG_ON(!set);
+ BUG_ON(!set->crtc);
+ BUG_ON(!set->crtc->helper_private);
+
+ /* Enforce sane interface api - has been abused by the fb helper. */
+ BUG_ON(!set->mode && set->fb);
+ BUG_ON(set->fb && set->num_connectors == 0);
+
+ crtc_funcs = set->crtc->helper_private;
+
+ if (!set->mode)
+ set->fb = NULL;
+
+ if (set->fb) {
+ DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+ return drm_crtc_helper_disable(set->crtc);
+ }
+
+ dev = set->crtc->dev;
+
+ /* Allocate space for the backup of all (non-pointer) crtc, encoder and
+ * connector data. */
+ save_crtcs = kzalloc(dev->mode_config.num_crtc *
+ sizeof(struct drm_crtc), GFP_KERNEL);
+ if (!save_crtcs)
+ return -ENOMEM;
+
+ save_encoders = kzalloc(dev->mode_config.num_encoder *
+ sizeof(struct drm_encoder), GFP_KERNEL);
+ if (!save_encoders) {
+ kfree(save_crtcs, dev->mode_config.num_crtc * sizeof(struct drm_crtc));
+ return -ENOMEM;
+ }
+
+ save_connectors = kzalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_connector), GFP_KERNEL);
+ if (!save_connectors) {
+ kfree(save_crtcs, dev->mode_config.num_crtc * sizeof(struct drm_crtc));
+ kfree(save_encoders, dev->mode_config.num_encoder * sizeof(struct drm_encoder));
+ return -ENOMEM;
+ }
+
+ /* Copy data. Note that driver private data is not affected.
+ * Should anything bad happen only the expected state is
+ * restored, not the drivers personal bookkeeping.
+ */
+ count = 0;
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ save_crtcs[count++] = *crtc;
+ }
+
+ count = 0;
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+ save_encoders[count++] = *encoder;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ save_connectors[count++] = *connector;
+ }
+
+ save_set.crtc = set->crtc;
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+ save_set.fb = set->crtc->fb;
+
+ /* We should be able to check here if the fb has the same properties
+ * and then just flip_or_move it */
+ if (set->crtc->fb != set->fb) {
+ /* If we have no fb then treat it as a full mode set */
+ if (set->crtc->fb == NULL) {
+ DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+ mode_changed = true;
+ } else if (set->fb == NULL) {
+ mode_changed = true;
+ } else if (set->fb->pixel_format !=
+ set->crtc->fb->pixel_format) {
+ mode_changed = true;
+ } else
+ fb_changed = true;
+ }
+
+ if (set->x != set->crtc->x || set->y != set->crtc->y)
+ fb_changed = true;
+
+ if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+ DRM_DEBUG_KMS("modes are different, full mode set\n");
+ drm_mode_debug_printmodeline(&set->crtc->mode);
+ drm_mode_debug_printmodeline(set->mode);
+ mode_changed = true;
+ }
+
+ /* a) traverse passed in connector list and get encoders for them */
+ count = 0;
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ new_encoder = connector->encoder;
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == connector) {
+ new_encoder = connector_funcs->best_encoder(connector);
+ /* if we can't get an encoder for a connector
+ we are setting now - then fail */
+ if (new_encoder == NULL)
+ /* don't break so fail path works correct */
+ fail = 1;
+ break;
+/*
+ if (connector->dpms != DRM_MODE_DPMS_ON) {
+ DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
+ mode_changed = true;
+ }
+*/
+ }
+ }
+
+ if (new_encoder != connector->encoder) {
+ DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ mode_changed = true;
+ /* If the encoder is reused for another connector, then
+ * the appropriate crtc will be set later.
+ */
+ if (connector->encoder)
+ connector->encoder->crtc = NULL;
+ connector->encoder = new_encoder;
+ }
+ }
+
+ if (fail) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ continue;
+
+ if (connector->encoder->crtc == set->crtc)
+ new_crtc = NULL;
+ else
+ new_crtc = connector->encoder->crtc;
+
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == connector)
+ new_crtc = set->crtc;
+ }
+
+ /* Make sure the new CRTC will work with the encoder */
+ if (new_crtc &&
+ !drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ if (new_crtc != connector->encoder->crtc) {
+ DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+ mode_changed = true;
+ connector->encoder->crtc = new_crtc;
+ }
+ if (new_crtc) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+ connector->base.id, drm_get_connector_name(connector),
+ new_crtc->base.id);
+ } else {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.id, drm_get_connector_name(connector));
+ }
+ }
+
+ /* mode_set_base is not a required function */
+ if (fb_changed && !crtc_funcs->mode_set_base)
+ mode_changed = true;
+
+ if (mode_changed) {
+ set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
+ if (set->crtc->enabled) {
+ DRM_DEBUG_KMS("attempting to set mode from"
+ " userspace\n");
+ drm_mode_debug_printmodeline(set->mode);
+ old_fb = set->crtc->fb;
+ set->crtc->fb = set->fb;
+ if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
+ set->x, set->y,
+ old_fb)) {
+ DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+ set->crtc->base.id);
+ set->crtc->fb = old_fb;
+ ret = -EINVAL;
+ goto fail;
+ }
+ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ drm_get_connector_name(set->connectors[i]));
+ set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
+ }
+ }
+ drm_helper_disable_unused_functions(dev);
+ } else if (fb_changed) {
+ set->crtc->x = set->x;
+ set->crtc->y = set->y;
+
+ old_fb = set->crtc->fb;
+ if (set->crtc->fb != set->fb)
+ set->crtc->fb = set->fb;
+ ret = crtc_funcs->mode_set_base(set->crtc,
+ set->x, set->y, old_fb);
+ if (ret != 0) {
+ set->crtc->fb = old_fb;
+ goto fail;
+ }
+ }
+
+ kfree(save_connectors, dev->mode_config.num_connector * sizeof(struct drm_connector));
+ kfree(save_encoders, dev->mode_config.num_encoder * sizeof(struct drm_encoder));
+ kfree(save_crtcs, dev->mode_config.num_crtc * sizeof(struct drm_crtc));
+ return 0;
+
+fail:
+ /* Restore all previous data. */
+ count = 0;
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ *crtc = save_crtcs[count++];
+ }
+
+ count = 0;
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+ *encoder = save_encoders[count++];
+ }
+
+ count = 0;
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ *connector = save_connectors[count++];
+ }
+
+ /* Try to restore the config */
+ if (mode_changed &&
+ !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
+ save_set.y, save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+
+ kfree(save_connectors, dev->mode_config.num_connector * sizeof(struct drm_connector));
+ kfree(save_encoders, dev->mode_config.num_encoder * sizeof(struct drm_encoder));
+ kfree(save_crtcs, dev->mode_config.num_crtc * sizeof(struct drm_crtc));
+ return ret;
+}
+
+
+static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
+{
+ int dpms = DRM_MODE_DPMS_OFF;
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ if (connector->dpms < dpms)
+ dpms = connector->dpms;
+ return dpms;
+}
+
+static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
+{
+ int dpms = DRM_MODE_DPMS_OFF;
+ struct drm_connector *connector;
+ struct drm_device *dev = crtc->dev;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder && connector->encoder->crtc == crtc)
+ if (connector->dpms < dpms)
+ dpms = connector->dpms;
+ return dpms;
+}
+
+/**
+ * drm_helper_connector_dpms
+ * @connector affected connector
+ * @mode DPMS mode
+ *
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
+ */
+void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+ int old_dpms;
+
+ if (mode == connector->dpms)
+ return;
+
+ old_dpms = connector->dpms;
+ connector->dpms = mode;
+
+ /* from off to on, do crtc then encoder */
+ if (mode < old_dpms) {
+ if (crtc) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ if (encoder) {
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+ }
+
+ /* from on to off, do encoder then crtc */
+ if (mode > old_dpms) {
+ if (encoder) {
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+ if (crtc) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ }
+
+ return;
+}
+
+
+int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ int i;
+
+ fb->width = mode_cmd->width;
+ fb->height = mode_cmd->height;
+ for (i = 0; i < 4; i++) {
+ fb->pitches[i] = mode_cmd->pitches[i];
+ fb->offsets[i] = mode_cmd->offsets[i];
+ }
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+ &fb->bits_per_pixel);
+ fb->pixel_format = mode_cmd->pixel_format;
+
+ return 0;
+}
+
+void drm_helper_resume_force_mode(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ int ret;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+
+ if (!crtc->enabled)
+ continue;
+
+ ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+
+ if (ret == false)
+ DRM_ERROR("failed to set mode on crtc %p\n", (void *)crtc);
+
+ /* Turn off outputs that were already powered off */
+ if (drm_helper_choose_crtc_dpms(crtc)) {
+ list_for_each_entry(encoder, struct drm_encoder, &dev->mode_config.encoder_list, head) {
+
+ if(encoder->crtc != crtc)
+ continue;
+
+ encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+ }
+
+ crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ }
+ /* disable the unused connectors while restoring the modesetting */
+ drm_helper_disable_unused_functions(dev);
+}
+
+void drm_kms_helper_hotplug_event(struct drm_device *dev)
+{
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+}
+#define DRM_OUTPUT_POLL_PERIOD (10*DRM_HZ)
+static void
+output_poll_execute(struct work_struct *work)
+{
+ struct drm_device *dev = container_of(work, struct drm_device,
+ output_poll_work);
+ struct drm_connector *connector;
+ enum drm_connector_status old_status;
+ bool repoll = false, changed = false;
+
+ if (!drm_kms_helper_poll)
+ return;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+
+ /* Ignore forced connectors. */
+ if (connector->force)
+ continue;
+
+ /* Ignore HPD capable connectors and connectors where we don't
+ * want any hotplug detection at all for polling. */
+ if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
+ continue;
+
+ repoll = true;
+
+ old_status = connector->status;
+ /* if we are connected and don't want to poll for disconnect
+ skip it */
+ if (old_status == connector_status_connected &&
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
+ continue;
+
+ connector->status = connector->funcs->detect(connector, false);
+ if (old_status != connector->status) {
+ const char *old, *new;
+
+ old = drm_get_connector_status_name(old_status);
+ new = drm_get_connector_status_name(connector->status);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
+ "status updated from %s to %s\n",
+ connector->base.id,
+ drm_get_connector_name(connector),
+ old, new);
+
+ changed = true;
+ }
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
+
+ if (repoll)
+ test_set_timer(&dev->output_poll_timer, DRM_OUTPUT_POLL_PERIOD);
+}
+
+void
+output_poll_execute_timer(void *device)
+{
+ struct drm_device *dev = (struct drm_device *)device;
+ (void) queue_work(dev->drm_wq, &dev->output_poll_work);
+}
+
+void drm_kms_helper_poll_disable(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+ del_timer_sync(&dev->output_poll_timer);
+}
+
+void drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+ bool poll = false;
+ struct drm_connector *connector;
+
+ if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+ return;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ if (connector->polled)
+ poll = true;
+ }
+
+ if (poll)
+ test_set_timer(&dev->output_poll_timer, DRM_OUTPUT_POLL_PERIOD);
+}
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+ INIT_WORK(&dev->output_poll_work, output_poll_execute);
+ setup_timer(&dev->output_poll_timer, output_poll_execute_timer,
+ (void *)dev);
+
+ dev->mode_config.poll_enabled = true;
+
+ drm_kms_helper_poll_enable(dev);
+}
+
+void drm_kms_helper_poll_fini(struct drm_device *dev)
+{
+ drm_kms_helper_poll_disable(dev);
+}
+
+void drm_helper_hpd_irq_event(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+
+ /* kill timer and schedule immediate execution, this doesn't block */
+ del_timer_sync(&dev->output_poll_timer);
+ cancel_delayed_work(dev->drm_wq);
+ if (drm_kms_helper_poll)
+ test_set_timer(&dev->output_poll_timer, 0);
+}
+
diff --git a/usr/src/uts/common/io/drm/drm_dma.c b/usr/src/uts/common/io/drm/drm_dma.c
index 589c486..54d7981 100644
--- a/usr/src/uts/common/io/drm/drm_dma.c
+++ b/usr/src/uts/common/io/drm/drm_dma.c
@@ -1,8 +1,22 @@
/*
- * drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
*/
+
/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+/**
+ * \file drm_dma.c
+ * DMA IOCTL and function support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -25,109 +39,121 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
*/
-/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "drmP.h"
-int
-drm_dma_setup(drm_device_t *dev)
+/**
+ * Initialize the DMA data.
+ *
+ * \param dev DRM device.
+ * \return zero on success or a negative value on failure.
+ *
+ * Allocate and initialize a drm_device_dma structure.
+ */
+int drm_dma_setup(struct drm_device *dev)
{
int i;
- drm_buf_entry_t *pbuf;
- dev->dma = drm_calloc(1, sizeof (*dev->dma), DRM_MEM_DMA);
- if (dev->dma == NULL)
- return (ENOMEM);
+ dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
+ if (!dev->dma)
+ return -ENOMEM;
+
+ (void) memset(dev->dma, 0, sizeof(*dev->dma));
+
+ for (i = 0; i <= DRM_MAX_ORDER; i++)
+ (void) memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
mutex_init(&dev->dma_lock, NULL, MUTEX_DRIVER, NULL);
- pbuf = &(dev->dma->bufs[0]);
- for (i = 0; i <= DRM_MAX_ORDER; i++, pbuf++)
- bzero(pbuf, sizeof (drm_buf_entry_t));
- return (0);
+ return 0;
}
-void
-drm_dma_takedown(drm_device_t *dev)
+/**
+ * Cleanup the DMA resources.
+ *
+ * \param dev DRM device.
+ *
+ * Free all pages associated with DMA buffers, the buffers and pages lists, and
+ * finally the drm_device::dma structure itself.
+ */
+void drm_dma_takedown(struct drm_device *dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i, j;
- if (dma == NULL)
+ if (!dma)
return;
/* Clear dma buffers */
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].seg_count) {
- drm_free(dma->bufs[i].seglist,
- dma->bufs[i].seg_count *
- sizeof (*dma->bufs[0].seglist), DRM_MEM_SEGS);
+ DRM_DEBUG("order %d: buf_count = %d,"
+ " seg_count = %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].seg_count);
+ kfree(dma->bufs[i].seglist,
+ dma->bufs[i].seg_count * sizeof (*dma->bufs[0].seglist));
}
-
- for (j = 0; j < dma->bufs[i].buf_count; j++) {
- if (dma->bufs[i].buflist[j].dev_private) {
- drm_free(dma->bufs[i].buflist[j].dev_private,
- dma->bufs[i].buflist[j].dev_priv_size,
- DRM_MEM_BUFS);
+ if (dma->bufs[i].buf_count) {
+ for (j = 0; j < dma->bufs[i].buf_count; j++) {
+ if (dma->bufs[i].buflist[j].dev_private) {
+ kfree(dma->bufs[i].buflist[j].dev_private,
+ dma->bufs[i].buflist[j].dev_priv_size);
+ }
}
+ kfree(dma->bufs[i].buflist,
+ dma->bufs[i].buf_count * sizeof(*dma->bufs[0].buflist));
}
- if (dma->bufs[i].buf_count)
- drm_free(dma->bufs[i].buflist,
- dma->bufs[i].buf_count *
- sizeof (*dma->bufs[0].buflist), DRM_MEM_BUFS);
- }
- if (dma->buflist) {
- drm_free(dma->buflist,
- dma->buf_count *sizeof (*dma->buflist),
- DRM_MEM_BUFS);
- }
-
- if (dma->pagelist) {
- drm_free(dma->pagelist,
- dma->page_count *sizeof (*dma->pagelist),
- DRM_MEM_PAGES);
}
- drm_free(dev->dma, sizeof (*dev->dma), DRM_MEM_DRIVER);
+ if (dma->buflist)
+ kfree(dma->buflist, dma->buf_count * sizeof(*dma->buflist));
+ if (dma->pagelist)
+ kfree(dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
+ kfree(dev->dma, sizeof(*dev->dma));
dev->dma = NULL;
+
mutex_destroy(&dev->dma_lock);
}
-
-/*ARGSUSED*/
-void
-drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
+/**
+ * Free a buffer.
+ *
+ * \param dev DRM device.
+ * \param buf buffer to free.
+ *
+ * Resets the fields of \p buf.
+ */
+/* LINTED */
+void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf)
{
if (!buf)
return;
- buf->pending = 0;
- buf->filp = NULL;
- buf->used = 0;
+ buf->pending = 0;
+ buf->file_priv = NULL;
+ buf->used = 0;
}
-void
-drm_reclaim_buffers(drm_device_t *dev, drm_file_t *fpriv)
+/**
+ * Reclaim the buffers.
+ *
+ * \param file_priv DRM file private.
+ *
+ * Frees each buffer associated with \p file_priv not already on the hardware.
+ */
+void drm_core_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *file_priv)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
return;
for (i = 0; i < dma->buf_count; i++) {
- if (dma->buflist[i]->filp == fpriv) {
+ if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_free_buffer(dev, dma->buflist[i]);
@@ -143,15 +169,3 @@ drm_reclaim_buffers(drm_device_t *dev, drm_file_t *fpriv)
}
}
-/* Call into the driver-specific DMA handler */
-int
-drm_dma(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
-
- if (dev->driver->dma_ioctl) {
- return (dev->driver->dma_ioctl(dev, data, fpriv, mode));
- } else {
- return (EINVAL);
- }
-}
diff --git a/usr/src/uts/common/io/drm/drm_dp_helper.c b/usr/src/uts/common/io/drm/drm_dp_helper.c
new file mode 100644
index 0000000..9cb2fc4
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_dp_helper.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2013 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include "drm_dp_helper.h"
+#include "drmP.h"
+
+
+/* Helpers for DP link training */
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ u8 l = dp_link_status(link_status, i);
+ return (l >> s) & 0xf;
+}
+
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_align;
+ u8 lane_status;
+ int lane;
+
+ lane_align = dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(100);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+ if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ udelay(400);
+ else
+ mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate)
+{
+ switch (link_rate) {
+ case 162000:
+ default:
+ return DP_LINK_BW_1_62;
+ case 270000:
+ return DP_LINK_BW_2_7;
+ case 540000:
+ return DP_LINK_BW_5_4;
+ }
+}
+
+int drm_dp_bw_code_to_link_rate(u8 link_bw)
+{
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ return 162000;
+ case DP_LINK_BW_2_7:
+ return 270000;
+ case DP_LINK_BW_5_4:
+ return 540000;
+ }
+}
+
+int
+drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+}
diff --git a/usr/src/uts/common/io/drm/drm_dp_i2c_helper.c b/usr/src/uts/common/io/drm/drm_dp_i2c_helper.c
new file mode 100644
index 0000000..31350c1
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_dp_i2c_helper.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include "drm_dp_helper.h"
+#include "drmP.h"
+
+
+/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+static int
+i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ ret = (*algo_data->aux_ch)(adapter, mode,
+ write_byte, read_byte);
+ return ret;
+}
+
+/*
+ * I2C over AUX CH
+ */
+
+/*
+ * Send the address. If the I2C link is running, this 'restarts'
+ * the connection with the new address, this is used for doing
+ * a write followed by a read (as needed for DDC)
+ */
+static int
+i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_START;
+ int ret;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ algo_data->address = address;
+ algo_data->running = true;
+ ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ return ret;
+}
+
+/*
+ * Stop the I2C transaction. This closes out the link, sending
+ * a bare address packet with the MOT bit turned off
+ */
+static void
+i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_STOP;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ if (algo_data->running) {
+ (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ algo_data->running = false;
+ }
+}
+
+/*
+ * Write a single byte to the current I2C address, the
+ * the I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+ return ret;
+}
+
+/*
+ * Read a single byte from the current I2C address, the
+ * I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+ return ret;
+}
+
+static int
+i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ int ret = 0;
+ bool reading = false;
+ int m;
+ int b;
+
+ for (m = 0; m < num; m++) {
+ u16 len = msgs[m].len;
+ u8 *buf = msgs[m].buf;
+ reading = (msgs[m].flags & I2C_M_RD) != 0;
+ ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
+ if (ret < 0)
+ break;
+ if (reading) {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+ if (ret < 0)
+ break;
+ }
+ }
+ if (ret < 0)
+ break;
+ }
+ if (ret >= 0)
+ ret = num;
+ i2c_algo_dp_aux_stop(adapter, reading);
+ DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
+ return ret;
+}
+
+static u32
+/* LINTED E_FUNC_ARG_UNUSED */
+i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
+{
+ return 0;
+}
+
+static struct i2c_algorithm i2c_dp_aux_algo = {
+ .master_xfer = i2c_algo_dp_aux_xfer,
+ .functionality = i2c_algo_dp_aux_functionality,
+};
+
+static void
+i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
+{
+ (void) i2c_algo_dp_aux_address(adapter, 0, false);
+ (void) i2c_algo_dp_aux_stop(adapter, false);
+
+}
+
+static int
+i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
+{
+ adapter->algo = &i2c_dp_aux_algo;
+ adapter->retries = 3;
+ i2c_dp_aux_reset_bus(adapter);
+ return 0;
+}
+
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
+{
+ int error;
+
+ error = i2c_dp_aux_prepare_bus(adapter);
+ if (error)
+ return error;
+// error = i2c_add_adapter(adapter);
+ return error;
+}
diff --git a/usr/src/uts/common/io/drm/drm_drawable.c b/usr/src/uts/common/io/drm/drm_drawable.c
deleted file mode 100644
index 3ccc443..0000000
--- a/usr/src/uts/common/io/drm/drm_drawable.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * drm_drawable.h -- IOCTLs for drawables -*- linux-c -*-
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- */
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
- */
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#include "drmP.h"
-
-/*ARGSUSED*/
-int
-drm_adddraw(DRM_IOCTL_ARGS)
-{
- drm_draw_t draw;
-
- draw.handle = 0; /* NOOP */
- DRM_DEBUG("draw.handle = %d\n", draw.handle);
-
- DRM_COPYTO_WITH_RETURN((void *)data, &draw, sizeof (draw));
-
- return (0);
-}
-
-/*ARGSUSED*/
-int
-drm_rmdraw(DRM_IOCTL_ARGS)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-drm_drawable_info_t *
-drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) {
- return (NULL);
-}
-
-/*ARGSUSED*/
-int
-drm_update_draw(DRM_IOCTL_ARGS)
-{
- DRM_DEBUG("drm_update_draw\n");
- return (0);
-}
diff --git a/usr/src/uts/common/io/drm/drm_drv.c b/usr/src/uts/common/io/drm/drm_drv.c
index ae86db5..f992c16 100644
--- a/usr/src/uts/common/io/drm/drm_drv.c
+++ b/usr/src/uts/common/io/drm/drm_drv.c
@@ -1,11 +1,15 @@
/*
+ * Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
* drm_drv.h -- Generic driver template -*- linux-c -*-
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
*/
/*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2013, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -33,545 +37,640 @@
*
*/
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#include "drmP.h"
#include "drm.h"
+#include "drmP.h"
+#include "drm_core.h"
+#include "drm_io32.h"
#include "drm_sarea.h"
-
-int drm_debug_flag = 1;
-
-#define DRIVER_IOCTL_COUNT 256
-drm_ioctl_desc_t drm_ioctls[DRIVER_IOCTL_COUNT] = {
- [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] =
- {drm_version, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] =
- {drm_getunique, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] =
- {drm_getmagic, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] =
- {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] =
- {drm_getmap, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] =
- {drm_getclient, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] =
- {drm_getstats, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] =
- {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MODESET_CTL)] =
- {drm_modeset_ctl, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GEM_CLOSE)] =
- {drm_gem_close_ioctl, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GEM_FLINK)] =
- {drm_gem_flink_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_GEM_OPEN)] =
- {drm_gem_open_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] =
- {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] =
- {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] =
- {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] =
- {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] =
- {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] =
- {drm_rmmap_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] =
- {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] =
- {drm_getsareactx, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] =
- {drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] =
- {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] =
- {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] =
- {drm_getctx, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] =
- {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] =
- {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] =
- {drm_resctx, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] =
- {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] =
- {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] =
- {drm_lock, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] =
- {drm_unlock, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] =
- {drm_noop, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] =
- {drm_addbufs_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] =
- {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] =
- {drm_infobufs, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] =
- {drm_mapbufs, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] =
- {drm_freebufs, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_DMA)] =
- {drm_dma, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] =
- {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] =
- {drm_agp_acquire, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] =
- {drm_agp_release, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] =
- {drm_agp_enable, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] =
- {drm_agp_info, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] =
- {drm_agp_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] =
- {drm_agp_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] =
- {drm_agp_bind, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] =
- {drm_agp_unbind, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] =
- {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] =
- {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] =
- {drm_wait_vblank, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] =
- {drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+#include "drm_linux_list.h"
+
+static int drm_version(DRM_IOCTL_ARGS);
+
+/** Ioctl table */
+static struct drm_ioctl_desc drm_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED, copyin32_drm_version, copyout32_drm_version),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0, copyin32_drm_unique, copyout32_drm_unique),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED, copyin32_drm_map, copyout32_drm_map),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED, copyin32_drm_client, copyout32_drm_client),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED, NULL, copyout32_drm_stats),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, copyin32_drm_map, copyout32_drm_map),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH, copyin32_drm_map, copyout32_drm_map),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, copyin32_drm_ctx_priv_map, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH, copyin32_drm_ctx_priv_map, copyout32_drm_ctx_priv_map),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH, copyin32_drm_ctx_res, copyout32_drm_ctx_res),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, copyin32_drm_buf_desc, copyout32_drm_buf_desc),
+ DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH, copyin32_drm_buf_map, copyout32_drm_buf_map),
+ DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH, copyin32_drm_buf_free, NULL),
+ /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
+ DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, copyin32_drm_scatter_gather, copyout32_drm_scatter_gather),
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, copyin32_drm_scatter_gather, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED, copyin32_drm_wait_vblank, copyout32_drm_wait_vblank),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL)
};
-extern void idr_list_free(struct idr_list *head);
+#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
-const char *
-drm_find_description(int vendor, int device, drm_pci_id_list_t *idlist)
-{
- int i = 0;
- for (i = 0; idlist[i].vendor != 0; i++) {
- if ((idlist[i].vendor == vendor) &&
- (idlist[i].device == device)) {
- return (idlist[i].name);
- }
- }
- return ((char *)NULL);
-}
-
-static int
-drm_firstopen(drm_device_t *dev)
-{
- int i;
- int retval;
- drm_local_map_t *map;
-
- /* prebuild the SAREA */
- retval = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
- _DRM_CONTAINS_LOCK, &map);
- if (retval != 0) {
- DRM_ERROR("firstopen: failed to prebuild SAREA");
- return (retval);
- }
-
- if (dev->driver->use_agp) {
- DRM_DEBUG("drm_firstopen: use_agp=%d", dev->driver->use_agp);
- if (drm_device_is_agp(dev))
- dev->agp = drm_agp_init(dev);
- if (dev->driver->require_agp && dev->agp == NULL) {
- DRM_ERROR("couldn't initialize AGP");
- return (EIO);
- }
- }
-
- if (dev->driver->firstopen)
- retval = dev->driver->firstopen(dev);
-
- if (retval != 0) {
- DRM_ERROR("drm_firstopen: driver-specific firstopen failed");
- return (retval);
- }
-
- dev->buf_use = 0;
-
- if (dev->driver->use_dma) {
- i = drm_dma_setup(dev);
- if (i != 0)
- return (i);
- }
- dev->counters = 6;
- dev->types[0] = _DRM_STAT_LOCK;
- dev->types[1] = _DRM_STAT_OPENS;
- dev->types[2] = _DRM_STAT_CLOSES;
- dev->types[3] = _DRM_STAT_IOCTLS;
- dev->types[4] = _DRM_STAT_LOCKS;
- dev->types[5] = _DRM_STAT_UNLOCKS;
-
- for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
- *(&dev->counts[i]) = 0;
-
- for (i = 0; i < DRM_HASH_SIZE; i++) {
- dev->magiclist[i].head = NULL;
- dev->magiclist[i].tail = NULL;
- }
-
- dev->irq_enabled = 0;
- dev->context_flag = 0;
- dev->last_context = 0;
- dev->if_version = 0;
-
- return (0);
-}
-
-/* Free resources associated with the DRM on the last close. */
-static int
-drm_lastclose(drm_device_t *dev)
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
{
- drm_magic_entry_t *pt, *next;
- drm_local_map_t *map, *mapsave;
- int i;
-
- DRM_SPINLOCK_ASSERT(&dev->dev_lock);
+ DRM_DEBUG("\n");
- if (dev->driver->lastclose != NULL)
+ if (dev->driver->lastclose)
dev->driver->lastclose(dev);
+ DRM_DEBUG("driver lastclose completed\n");
- if (dev->irq_enabled)
+ if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
(void) drm_irq_uninstall(dev);
- if (dev->unique) {
- drm_free(dev->unique, dev->unique_len + 1, DRM_MEM_DRIVER);
- dev->unique = NULL;
- dev->unique_len = 0;
- }
-
- /* Clear pid list */
- for (i = 0; i < DRM_HASH_SIZE; i++) {
- for (pt = dev->magiclist[i].head; pt; pt = next) {
- next = pt->next;
- drm_free(pt, sizeof (*pt), DRM_MEM_MAGIC);
- }
- dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
- }
+ mutex_lock(&dev->struct_mutex);
/* Clear AGP information */
- if (dev->agp) {
- drm_agp_mem_t *entry;
- drm_agp_mem_t *nexte;
+ if (drm_core_has_AGP(dev) && dev->agp &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct drm_agp_mem *entry, *tempe;
- /*
- * Remove AGP resources, but leave dev->agp
- * intact until drm_cleanup is called.
- */
- for (entry = dev->agp->memory; entry; entry = nexte) {
- nexte = entry->next;
+ /* Remove AGP resources, but leave dev->agp
+ intact until drv_cleanup is called. */
+ list_for_each_entry_safe(entry, tempe, struct drm_agp_mem, &dev->agp->memory, head) {
if (entry->bound)
- (void) drm_agp_unbind_memory(
- (unsigned long)entry->handle, dev);
- (void) drm_agp_free_memory(entry->handle, dev);
- drm_free(entry, sizeof (*entry), DRM_MEM_AGPLISTS);
+ (void) drm_agp_unbind_memory(entry->handle, dev);
+ kfree(entry, sizeof (*entry));
}
- dev->agp->memory = NULL;
+ INIT_LIST_HEAD(&dev->agp->memory);
if (dev->agp->acquired)
- (void) drm_agp_do_release(dev);
+ (void) drm_agp_release(dev);
dev->agp->acquired = 0;
- dev->agp->enabled = 0;
- drm_agp_fini(dev);
+ dev->agp->enabled = 0;
}
-
- if (dev->sg != NULL) {
- drm_sg_mem_t *entry;
- entry = dev->sg;
+ if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_sg_cleanup(dev->sg);
dev->sg = NULL;
- drm_sg_cleanup(dev, entry);
}
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+ !drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_dma_takedown(dev);
- /* Clean up maps that weren't set up by the driver. */
- TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) {
- if (!map->kernel_owned)
- drm_rmmap(dev, map);
- }
+ mutex_unlock(&dev->struct_mutex);
- drm_dma_takedown(dev);
- if (dev->lock.hw_lock) {
- dev->lock.hw_lock = NULL; /* SHM removed */
- dev->lock.filp = NULL;
+ DRM_DEBUG("lastclose completed\n");
+ return 0;
+}
- mutex_enter(&(dev->lock.lock_mutex));
- cv_broadcast(&(dev->lock.lock_cv));
- mutex_exit(&(dev->lock.lock_mutex));
+/**
+ * Module initialization. Called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported).
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes an array of drm_device structures, and attempts to
+ * initialize all available devices, using consecutive minors, registering the
+ * stubs and initializing the AGP device.
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+int drm_init(struct drm_device *dev, struct drm_driver *driver)
+{
+ dev_info_t *dip = dev->devinfo;
+ struct pci_dev *pdev = NULL;
+ struct dev_ops *devop;
+ int ret, i;
+
+ DRM_DEBUG("\n");
+
+ pdev = pci_dev_create(dev);
+ if (!pdev)
+ return -EFAULT;
+
+ for (i = 0; driver->id_table[i].vendor != 0; i++) {
+ if ((driver->id_table[i].vendor == pdev->vendor) &&
+ (driver->id_table[i].device == pdev->device)) {
+ ret = drm_get_dev(dev, pdev, driver, driver->id_table[i].driver_data);
+ if (ret) {
+ pci_dev_destroy(pdev);
+ return ret;
+ }
+
+ /*
+ * DRM drivers are required to use common cb_ops
+ */
+ devop = ddi_get_driver(dev->devinfo);
+ if (devop->devo_cb_ops != &drm_cb_ops) {
+ devop->devo_cb_ops = &drm_cb_ops;
+ }
+ /*
+ * Initialize for fma support
+ */
+ dev->drm_fm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "fm-capable",
+ DDI_FM_EREPORT_CAPABLE | DDI_FM_DMACHK_CAPABLE |
+ DDI_FM_ACCCHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
+
+ drm_fm_init(dev);
+ return 0;
+ }
}
- return (0);
+ pci_dev_destroy(pdev);
+ return -ENODEV;
}
-static int
-drm_load(drm_device_t *dev)
+void drm_exit(struct drm_device *dev)
{
- int retcode;
-
- cv_init(&(dev->lock.lock_cv), NULL, CV_DRIVER, NULL);
- mutex_init(&(dev->lock.lock_mutex), NULL, MUTEX_DRIVER, NULL);
- mutex_init(&(dev->dev_lock), "drmdev", MUTEX_DRIVER, NULL);
- mutex_init(&dev->irq_lock, "drmirq", MUTEX_DRIVER,
- (void *)dev->intr_block);
- mutex_init(&dev->drw_lock, "drmdrw", MUTEX_DRIVER, NULL);
- mutex_init(&dev->tasklet_lock, "drmtsk", MUTEX_DRIVER, NULL);
-
- dev->irq = pci_get_irq(dev);
- dev->pci_vendor = pci_get_vendor(dev);
- dev->pci_device = pci_get_device(dev);
-
- TAILQ_INIT(&dev->maplist);
- TAILQ_INIT(&dev->minordevs);
- TAILQ_INIT(&dev->files);
- if (dev->driver->load != NULL) {
- retcode = dev->driver->load(dev, 0);
- if (retcode != 0) {
- DRM_ERROR("drm_load: failed\n");
- goto error;
- }
- }
+ drm_put_dev(dev);
+ pci_dev_destroy(dev->pdev);
+ drm_fm_fini(dev);
+}
- retcode = drm_ctxbitmap_init(dev);
- if (retcode != 0) {
- DRM_ERROR("drm_load: Cannot allocate memory for ctx bitmap");
- goto error;
- }
+int __init drm_core_init(void)
+{
+ INIT_LIST_HEAD(&drm_iomem_list);
- if (dev->driver->use_gem == 1) {
- retcode = drm_gem_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot initialize graphics execution "
- "manager (GEM)\n");
- goto error;
- }
- }
+ idr_init(&drm_minors_idr);
- if (drm_init_kstats(dev)) {
- DRM_ERROR("drm_attach => drm_load: init kstats error");
- retcode = EFAULT;
- goto error;
- }
+ DRM_INFO("Initialized %s %d.%d.%d %s\n",
+ CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+ return 0;
+}
- DRM_INFO("!drm: Initialized %s %d.%d.%d %s ",
- dev->driver->driver_name,
- dev->driver->driver_major,
- dev->driver->driver_minor,
- dev->driver->driver_patchlevel,
- dev->driver->driver_date);
- return (0);
-
-error:
- DRM_LOCK();
- (void) drm_lastclose(dev);
- DRM_UNLOCK();
- cv_destroy(&(dev->lock.lock_cv));
- mutex_destroy(&(dev->lock.lock_mutex));
- mutex_destroy(&dev->irq_lock);
- mutex_destroy(&(dev->dev_lock));
- mutex_destroy(&dev->drw_lock);
- mutex_destroy(&dev->tasklet_lock);
-
- return (retcode);
+void __exit drm_core_exit(void)
+{
+ idr_remove_all(&drm_minors_idr);
+ idr_destroy(&drm_minors_idr);
}
-/* called when cleanup this module */
-static void
-drm_unload(drm_device_t *dev)
+/**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
{
- drm_local_map_t *map;
+ int len;
+
+ /* don't overflow userbuf */
+ len = strlen(value);
+ if (len > *buf_len)
+ len = *buf_len;
+
+ /* let userspace know exact length of driver value (which could be
+ * larger than the userspace-supplied buffer) */
+ *buf_len = strlen(value);
+
+ /* finally, try filling in the userbuf */
+ if (len && buf)
+ if (copy_to_user(buf, value, len))
+ return -EFAULT;
+ return 0;
+}
- drm_vblank_cleanup(dev);
+/**
+ * Get version information
+ *
+ * \return zero on success or negative number on failure.
+ *
+ * Fills in the version information in \p arg.
+ */
+/* LINTED */
+static int drm_version(DRM_IOCTL_ARGS)
+{
+ struct drm_version *version = data;
+ int err;
+
+ version->version_major = dev->driver->major;
+ version->version_minor = dev->driver->minor;
+ version->version_patchlevel = dev->driver->patchlevel;
+ err = drm_copy_field(version->name, &version->name_len,
+ dev->driver->name);
+ if (!err)
+ err = drm_copy_field(version->date, &version->date_len,
+ dev->driver->date);
+ if (!err)
+ err = drm_copy_field(version->desc, &version->desc_len,
+ dev->driver->desc);
+
+ return err;
+}
- drm_ctxbitmap_cleanup(dev);
+/**
+ * Called whenever a process performs an ioctl on /dev/drm.
+ *
+ * \return zero on success or negative number on failure.
+ *
+ * Looks up the ioctl function in the ::ioctls table, checking for root
+ * previleges if so required, and dispatches to the respective function.
+ */
+int drm_ioctl(dev_t dev_id, struct drm_file *file_priv,
+ int cmd, intptr_t arg, int mode, cred_t *credp)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_ioctl_desc *ioctl;
+ drm_ioctl_t *func;
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ int retcode = -EINVAL;
+ char stack_kdata[128];
+ char *kdata = NULL;
+ unsigned int usize, asize;
+
+ atomic_inc(&dev->ioctl_count);
+ atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
+ ++file_priv->ioctl_count;
+
+ if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+ ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
+ goto err_i1;
+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
+ (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+ ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ usize = asize = _IOC_SIZE(cmd);
+ cmd = ioctl->cmd;
+ }
+ else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+ ioctl = &drm_ioctls[nr];
+ cmd = ioctl->cmd;
+ usize = asize = _IOC_SIZE(cmd);
+ } else
+ goto err_i1;
+
+ /* Do not trust userspace, use our own definition */
+ func = ioctl->func;
+ /* is there a local override? */
+ if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
+ func = dev->driver->dma_ioctl;
+
+ if (!func) {
+ DRM_DEBUG("no function\n");
+ retcode = -EINVAL;
+ } else if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(credp)) ||
+ ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+ ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
+ (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
+ retcode = -EACCES;
+ } else {
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (asize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kmalloc(asize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto err_i1;
+ }
+ }
+ if (asize > usize)
+ (void) memset(kdata + usize, 0, asize - usize);
+ }
- if (dev->driver->use_gem == 1) {
- idr_list_free(&dev->object_name_idr);
- mutex_destroy(&dev->object_name_lock);
+ if (cmd & IOC_IN) {
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32 && ioctl->copyin32) {
+ if (ioctl->copyin32((void*)kdata, (void*)arg)) {
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+ } else {
+#endif
+ if (DRM_COPY_FROM_USER(kdata, (void __user *)arg,
+ _IOC_SIZE(cmd)) != 0) {
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+ }
+ retcode = func(dev_id, dev, kdata, file_priv, mode, credp);
+
+ if (cmd & IOC_OUT) {
+#ifdef _MULTI_DATAMODEL
+ if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32 && ioctl->copyout32) {
+ if (ioctl->copyout32((void*)arg, (void*)kdata)) {
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+ } else {
+#endif
+ if (DRM_COPY_TO_USER((void __user *)arg, kdata,
+ _IOC_SIZE(cmd)) != 0)
+ retcode = -EFAULT;
+#ifdef _MULTI_DATAMODEL
+ }
+#endif
+ }
}
- DRM_LOCK();
- (void) drm_lastclose(dev);
- DRM_UNLOCK();
+err_i1:
+ if (kdata && (kdata != stack_kdata))
+ kfree(kdata, asize);
+ atomic_dec(&dev->ioctl_count);
+ if (retcode)
+ DRM_DEBUG("ret = %d\n", -retcode);
+ return retcode;
+}
+
+struct drm_local_map *drm_getsarea(struct drm_device *dev)
+{
+ struct drm_map_list *entry;
- while ((map = TAILQ_FIRST(&dev->maplist)) != NULL) {
- drm_rmmap(dev, map);
+ list_for_each_entry(entry, struct drm_map_list, &dev->maplist, head) {
+ if (entry->map && entry->map->type == _DRM_SHM &&
+ (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+ return entry->map;
+ }
}
+ return NULL;
+}
- if (dev->driver->unload != NULL)
- dev->driver->unload(dev);
+void
+drm_gem_object_reference(struct drm_gem_object *obj)
+{
+ kref_get(&obj->refcount);
+}
- drm_mem_uninit();
- cv_destroy(&dev->lock.lock_cv);
- mutex_destroy(&dev->lock.lock_mutex);
- mutex_destroy(&dev->irq_lock);
- mutex_destroy(&dev->dev_lock);
- mutex_destroy(&dev->drw_lock);
- mutex_destroy(&dev->tasklet_lock);
+void
+drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+ if (obj != NULL)
+ kref_put(&obj->refcount, drm_gem_object_free);
+}
- dev->gtt_total = 0;
- atomic_set(&dev->pin_memory, 0);
- DRM_ERROR("drm_unload");
+void
+drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
+{
+ if (obj != NULL) {
+ struct drm_device *dev = obj->dev;
+ mutex_lock(&dev->struct_mutex);
+ kref_put(&obj->refcount, drm_gem_object_free);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
+void
+drm_gem_object_handle_reference(struct drm_gem_object *obj)
+{
+ drm_gem_object_reference(obj);
+ atomic_inc(&obj->handle_count);
+}
-/*ARGSUSED*/
-int
-drm_open(drm_device_t *dev, drm_cminor_t *mp, int openflags,
- int otyp, cred_t *credp)
+void
+drm_gem_object_handle_unreference(struct drm_gem_object *obj)
{
- int retcode;
+ if (obj == NULL)
+ return;
+
+ if (atomic_read(&obj->handle_count) == 0)
+ return;
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+ if (atomic_dec_and_test(&obj->handle_count))
+ drm_gem_object_handle_free(obj);
+ drm_gem_object_unreference(obj);
+}
- retcode = drm_open_helper(dev, mp, openflags, otyp, credp);
+void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
+{
+ if (obj == NULL)
+ return;
- if (!retcode) {
- atomic_inc_32(&dev->counts[_DRM_STAT_OPENS]);
- DRM_LOCK();
- if (!dev->open_count ++)
- retcode = drm_firstopen(dev);
- DRM_UNLOCK();
- }
+ if (atomic_read(&obj->handle_count) == 0)
+ return;
- return (retcode);
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+
+ if (atomic_dec_and_test(&obj->handle_count))
+ drm_gem_object_handle_free(obj);
+ drm_gem_object_unreference_unlocked(obj);
}
-/*ARGSUSED*/
+
+/*
+ * PCI bus specific error callback
+ */
+/* ARGSUSED */
int
-drm_close(drm_device_t *dev, int minor, int flag, int otyp,
- cred_t *credp)
+drm_fm_error_cb(
+ dev_info_t *dip,
+ ddi_fm_error_t *err,
+ const void *impl)
{
- drm_cminor_t *mp;
- drm_file_t *fpriv;
- int retcode = 0;
-
- DRM_LOCK();
- mp = drm_find_file_by_minor(dev, minor);
- if (!mp) {
- DRM_UNLOCK();
- DRM_ERROR("drm_close: can't find authenticator");
- return (EACCES);
- }
- fpriv = mp->fpriv;
- ASSERT(fpriv);
+ pci_ereport_post(dip, err, NULL);
- if (--fpriv->refs != 0)
- goto done;
+ return (err->fme_status);
+}
- if (dev->driver->preclose != NULL)
- dev->driver->preclose(dev, fpriv);
+void
+drm_fm_init(struct drm_device *dev)
+{
+ dev_info_t *dip = dev->devinfo;
+ if (dev->drm_fm_cap) {
+ /* we do not care the iblk cookie */
+ ddi_fm_init(dip, &dev->drm_fm_cap, NULL);
- /*
- * Begin inline drm_release
- */
- DRM_DEBUG("drm_close :pid = %d , open_count = %d",
- DRM_CURRENTPID, dev->open_count);
-
- if (dev->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
- dev->lock.filp == fpriv) {
- DRM_DEBUG("Process %d dead, freeing lock for context %d",
- DRM_CURRENTPID,
- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
- if (dev->driver->reclaim_buffers_locked != NULL)
- dev->driver->reclaim_buffers_locked(dev, fpriv);
- (void) drm_lock_free(dev, &dev->lock.hw_lock->lock,
- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
- } else if (dev->driver->reclaim_buffers_locked != NULL &&
- dev->lock.hw_lock != NULL) {
- DRM_ERROR("drm_close: "
- "retake lock not implemented yet");
- }
+ /*
+ * Initialize pci ereport capabilities if ereport capable
+ */
+ if (DDI_FM_EREPORT_CAP(dev->drm_fm_cap) ||
+ DDI_FM_ERRCB_CAP(dev->drm_fm_cap)) {
+ pci_ereport_setup(dip);
+ }
- if (dev->driver->use_dma) {
- drm_reclaim_buffers(dev, fpriv);
+ /*
+ * Register error callback if error callback capable
+ */
+ if (DDI_FM_ERRCB_CAP(dev->drm_fm_cap)) {
+ ddi_fm_handler_register(dip,
+ drm_fm_error_cb, (void*) dev);
+ }
}
+}
- if (dev->driver->use_gem == 1) {
- drm_gem_release(dev, fpriv);
- }
+void
+drm_fm_fini(struct drm_device *dev)
+{
+ dev_info_t *dip = dev->devinfo;
+ if (dev->drm_fm_cap) {
+ if (DDI_FM_EREPORT_CAP(dev->drm_fm_cap) ||
+ DDI_FM_ERRCB_CAP(dev->drm_fm_cap)) {
+ pci_ereport_teardown(dip);
+ }
+
+ if (DDI_FM_ERRCB_CAP(dev->drm_fm_cap)) {
+ ddi_fm_handler_unregister(dip);
+ }
- if (dev->driver->postclose != NULL) {
- dev->driver->postclose(dev, fpriv);
+ ddi_fm_fini(dip);
}
- TAILQ_REMOVE(&dev->files, fpriv, link);
- drm_free(fpriv, sizeof (*fpriv), DRM_MEM_FILES);
+}
-done:
- atomic_inc_32(&dev->counts[_DRM_STAT_CLOSES]);
+/*
+ * report a device error.
+ * detail - DDI_FM_DEVICE_*
+ */
+void
+drm_fm_ereport(
+ struct drm_device *dev,
+ char *detail)
+{
+ uint64_t ena;
+ char buf[FM_MAX_CLASS];
+ dev_info_t *dip = dev->devinfo;
- TAILQ_REMOVE(&dev->minordevs, mp, link);
- drm_free(mp, sizeof (*mp), DRM_MEM_FILES);
+ if (DDI_FM_EREPORT_CAP(dev->drm_fm_cap)) {
- if (--dev->open_count == 0) {
- retcode = drm_lastclose(dev);
- }
- DRM_UNLOCK();
+ (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE,
+ detail);
- return (retcode);
-}
+ ena = fm_ena_generate(0, FM_ENA_FMT1);
-int
-drm_attach(drm_device_t *dev)
-{
- return (drm_load(dev));
+ ddi_fm_ereport_post(dip, buf, ena, DDI_NOSLEEP,
+ FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
+ }
}
+/*
+ * Check PIO Access error, like register access.
+ */
int
-drm_detach(drm_device_t *dev)
+drm_check_acc_handle(
+ struct drm_device *dev,
+ ddi_acc_handle_t handle)
{
- drm_unload(dev);
- drm_fini_kstats(dev);
- return (DDI_SUCCESS);
-}
+ ddi_fm_error_t de;
+
+ if (!DDI_FM_ACC_ERR_CAP(dev->drm_fm_cap)) {
+
+ return (DDI_FM_OK);
-static int
-drm_get_businfo(drm_device_t *dev)
-{
- dev->irq = pci_get_irq(dev);
- if (dev->irq == -1) {
- DRM_ERROR("drm_get_businfo: get irq error");
- return (DDI_FAILURE);
- }
- /* XXX Fix domain number (alpha hoses) */
- dev->pci_domain = 0;
- if (pci_get_info(dev, &dev->pci_bus,
- &dev->pci_slot, &dev->pci_func) != DDI_SUCCESS) {
- DRM_ERROR("drm_get_businfo: get bus slot func error ");
- return (DDI_FAILURE);
}
- DRM_DEBUG("drm_get_businfo: pci bus: %d, pci slot :%d pci func %d",
- dev->pci_bus, dev->pci_slot, dev->pci_func);
- return (DDI_SUCCESS);
+
+ ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
+ ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
+
+ return (de.fme_status);
+
}
+/*
+ * Check DMA Access error.
+ */
int
-drm_probe(drm_device_t *dev, drm_pci_id_list_t *idlist)
+drm_check_dma_handle(
+ struct drm_device *dev,
+ ddi_dma_handle_t handle)
{
- const char *s = NULL;
- int vendor, device;
-
- vendor = pci_get_vendor(dev);
- device = pci_get_device(dev);
-
- s = drm_find_description(vendor, device, idlist);
- if (s != NULL) {
- dev->desc = s;
- if (drm_get_businfo(dev) != DDI_SUCCESS) {
- DRM_ERROR("drm_probe: drm get bus info error");
- return (DDI_FAILURE);
- }
- return (DDI_SUCCESS);
+ ddi_fm_error_t de;
+
+ if (!DDI_FM_DMA_ERR_CAP(dev->drm_fm_cap)) {
+
+ return (DDI_FM_OK);
+
}
- return (DDI_FAILURE);
+
+ ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
+ ddi_fm_dma_err_clear(handle, DDI_FME_VERSION);
+
+ return (de.fme_status);
+
}
diff --git a/usr/src/uts/common/io/drm/drm_edid.c b/usr/src/uts/common/io/drm/drm_edid.c
new file mode 100644
index 0000000..15fe208
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_edid.c
@@ -0,0 +1,2995 @@
+/*
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006 Luc Verhaegen (quirks list)
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
+ * FB layer.
+ * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "drm.h"
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_sun_i2c.h"
+#include "drm_crtc.h"
+
+#define version_greater(edid, maj, min) \
+ (((edid)->version > (maj)) || \
+ ((edid)->version == (maj) && (edid)->revision > (min)))
+
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
+
+/*
+ * EDID blocks out in the wild have a variety of bugs, try to collect
+ * them here (note that userspace may work around broken monitors first,
+ * but fixes should make their way here so that the kernel "just works"
+ * on as many displays as possible).
+ */
+
+/* First detailed mode wrong, use largest 60Hz mode */
+#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
+/* Reported 135MHz pixel clock is too high, needs adjustment */
+#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
+/* Prefer the largest mode at 75 Hz */
+#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
+/* Detail timing is in cm not mm */
+#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
+/* Detailed timing descriptors have bogus size values, so just take the
+ * maximum size and use that.
+ */
+#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
+/* Monitor forgot to set the first detailed is preferred bit. */
+#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
+/* use +hsync +vsync for detailed mode */
+#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
+/* Force reduced-blanking timings for detailed modes */
+#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
+
+struct detailed_mode_closure {
+ struct drm_connector *connector;
+ struct edid *edid;
+ bool preferred;
+ u32 quirks;
+ int modes;
+};
+
+#define LEVEL_DMT 0
+#define LEVEL_GTF 1
+#define LEVEL_GTF2 2
+#define LEVEL_CVT 3
+
+static struct edid_quirk {
+ char vendor[4];
+ int product_id;
+ u32 quirks;
+} edid_quirk_list[] = {
+ /* Acer AL1706 */
+ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Acer F51 */
+ { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Unknown Acer */
+ { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Belinea 10 15 55 */
+ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
+ { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* Envision Peripherals, Inc. EN-7100e */
+ { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+ /* Envision EN2028 */
+ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* Funai Electronics PM36B */
+ { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
+ EDID_QUIRK_DETAILED_IN_CM },
+
+ /* LG Philips LCD LP154W01-A5 */
+ { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+ { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+
+ /* Philips 107p5 CRT */
+ { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Proview AY765C */
+ { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Samsung SyncMaster 205BW. Note: irony */
+ { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
+ /* Samsung SyncMaster 22[5-6]BW */
+ { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
+ { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* ViewSonic VA2026w */
+ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+};
+
+/*
+ * Autogenerated from the DMT spec.
+ * This table is copied from xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode drm_dmt_modes[] = {
+ /* 640x350@85Hz */
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 350, 382, 385, 445, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x400@85Hz */
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 400, 401, 404, 445, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x400@85Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
+ 828, 936, 0, 400, 401, 404, 446, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 492, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@85Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
+ 752, 832, 0, 480, 481, 484, 509, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@56Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@72Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@85Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
+ 896, 1048, 0, 600, 601, 604, 631, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@120Hz RB */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
+ 880, 960, 0, 600, 603, 607, 636, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 848x480@60Hz */
+ { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
+ 976, 1088, 0, 480, 486, 494, 517, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@43Hz, interlace */
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 772, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@85Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@120Hz RB */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
+ 1104, 1184, 0, 768, 771, 775, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 790, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@75Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
+ 1488, 1696, 0, 768, 771, 778, 805, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@85Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
+ 1496, 1712, 0, 768, 771, 778, 809, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@120Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 823, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@75Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
+ 1488, 1696, 0, 800, 803, 809, 838, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@85Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
+ 1496, 1712, 0, 800, 803, 809, 843, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@120Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 847, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@85Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
+ 1504, 1728, 0, 960, 961, 964, 1011, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@120Hz RB */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
+ 1360, 1440, 0, 960, 963, 967, 1017, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@75Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@85Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
+ 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@120Hz RB */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
+ 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@120Hz RB */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
+ 1440, 1520, 0, 768, 771, 776, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@75Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
+ 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@85Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
+ 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@120Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 926, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@75Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
+ 1688, 1936, 0, 900, 903, 909, 942, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@85Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
+ 1696, 1952, 0, 900, 903, 909, 948, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@120Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 953, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@65Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@70Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@75Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@85Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@120Hz RB */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
+ 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@75Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
+ 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@85Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
+ 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@120Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@75Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
+ 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@120Hz RB */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
+ 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1856x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@75Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
+ 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@120Hz RB */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
+ 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@75Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
+ 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@85Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
+ 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@120Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@75Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
+ 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@120Hz RB */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
+ 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@75HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@85HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@120Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+};
+
+static const struct drm_display_mode edid_est_modes[] = {
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+ 768, 864, 0, 480, 483, 486, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+ 846, 900, 0, 400, 421, 423, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+ 846, 900, 0, 400, 412, 414, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+ 928, 1152, 0, 624, 625, 628, 667, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+struct minimode {
+ short w;
+ short h;
+ short r;
+ short rb;
+};
+
+static const struct minimode est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 85, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+
+static const struct minimode extra_modes[] = {
+ { 1024, 576, 60, 0 },
+ { 1366, 768, 60, 0 },
+ { 1600, 900, 60, 0 },
+ { 1680, 945, 60, 0 },
+ { 1920, 1080, 60, 0 },
+ { 2048, 1152, 60, 0 },
+ { 2048, 1536, 60, 0 },
+};
+
+/*
+ * Probably taken from CEA-861 spec.
+ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode edid_cea_modes[] = {
+ /* 1 - 640x480@60Hz */
+ { .vrefresh = 60, DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2 - 720x480@60Hz */
+ { .vrefresh = 60, DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 3 - 720x480@60Hz */
+ { .vrefresh = 60, DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 4 - 1280x720@60Hz */
+ { .vrefresh = 60, DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 5 - 1920x1080i@60Hz */
+ { .vrefresh = 60, DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 6 - 1440x480i@60Hz */
+ { .vrefresh = 60, DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 7 - 1440x480i@60Hz */
+ { .vrefresh = 60, DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 8 - 1440x240@60Hz */
+ { .vrefresh = 60, DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK)},
+ /* 9 - 1440x240@60Hz */
+ { .vrefresh = 60, DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK)},
+ /* 10 - 2880x480i@60Hz */
+ { .vrefresh = 60, DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 11 - 2880x480i@60Hz */
+ { .vrefresh = 60, DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 12 - 2880x240@60Hz */
+ { .vrefresh = 60, DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 13 - 2880x240@60Hz */
+ { .vrefresh = 60, DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 14 - 1440x480@60Hz */
+ { .vrefresh = 60, DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 15 - 1440x480@60Hz */
+ { .vrefresh = 60, DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 16 - 1920x1080@60Hz */
+ { .vrefresh = 60, DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 17 - 720x576@50Hz */
+ { .vrefresh = 50, DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 18 - 720x576@50Hz */
+ { .vrefresh = 50, DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 19 - 1280x720@50Hz */
+ { .vrefresh = 50, DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 20 - 1920x1080i@50Hz */
+ { .vrefresh = 50, DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 21 - 1440x576i@50Hz */
+ { .vrefresh = 50, DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 22 - 1440x576i@50Hz */
+ { .vrefresh = 50, DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 23 - 1440x288@50Hz */
+ { .vrefresh = 50, DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK)},
+ /* 24 - 1440x288@50Hz */
+ { .vrefresh = 50, DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK)},
+ /* 25 - 2880x576i@50Hz */
+ { .vrefresh = 50, DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 26 - 2880x576i@50Hz */
+ { .vrefresh = 50, DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 27 - 2880x288@50Hz */
+ { .vrefresh = 50, DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 28 - 2880x288@50Hz */
+ { .vrefresh = 50, DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 29 - 1440x576@50Hz */
+ { .vrefresh = 50, DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 30 - 1440x576@50Hz */
+ { .vrefresh = 50, DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 31 - 1920x1080@50Hz */
+ { .vrefresh = 50, DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 32 - 1920x1080@24Hz */
+ { .vrefresh = 50, DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 33 - 1920x1080@25Hz */
+ { .vrefresh = 25, DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 34 - 1920x1080@30Hz */
+ { .vrefresh = 30, DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 35 - 2880x480@60Hz */
+ { .vrefresh = 60, DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 36 - 2880x480@60Hz */
+ { .vrefresh = 60, DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 37 - 2880x576@50Hz */
+ { .vrefresh = 50, DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 38 - 2880x576@50Hz */
+ { .vrefresh = 50, DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 39 - 1920x1080i@50Hz */
+ { .vrefresh = 50, DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+ 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 40 - 1920x1080i@100Hz */
+ { .vrefresh = 100, DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 41 - 1280x720@100Hz */
+ { .vrefresh = 100, DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 42 - 720x576@100Hz */
+ { .vrefresh = 100, DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 43 - 720x576@100Hz */
+ { .vrefresh = 100, DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 44 - 1440x576i@100Hz */
+ { .vrefresh = 100, DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK)},
+ /* 45 - 1440x576i@100Hz */
+ { .vrefresh = 100, DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK)},
+ /* 46 - 1920x1080i@120Hz */
+ { .vrefresh = 120, DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE)},
+ /* 47 - 1280x720@120Hz */
+ { .vrefresh = 120, DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 48 - 720x480@120Hz */
+ { .vrefresh = 120, DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 49 - 720x480@120Hz */
+ { .vrefresh = 120, DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 50 - 1440x480i@120Hz */
+ { .vrefresh = 120, DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 51 - 1440x480i@120Hz */
+ { .vrefresh = 120, DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 52 - 720x576@200Hz */
+ { .vrefresh = 200, DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 53 - 720x576@200Hz */
+ { .vrefresh = 200, DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 54 - 1440x576i@200Hz */
+ { .vrefresh = 200, DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 55 - 1440x576i@200Hz */
+ { .vrefresh = 200, DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 56 - 720x480@240Hz */
+ { .vrefresh = 240, DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 57 - 720x480@240Hz */
+ { .vrefresh = 240, DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ /* 58 - 1440x480i@240 */
+ { .vrefresh = 240, DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 59 - 1440x480i@240 */
+ { .vrefresh = 240, DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK)},
+ /* 60 - 1280x720@24Hz */
+ { .vrefresh = 24, DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 61 - 1280x720@25Hz */
+ { .vrefresh = 25, DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+ 3740, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 62 - 1280x720@30Hz */
+ { .vrefresh = 30, DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 63 - 1920x1080@120Hz */
+ { .vrefresh = 120, DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ /* 64 - 1920x1080@100Hz */
+ { .vrefresh = 100, DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+};
+
+/*** DDC fetch and block validation ***/
+
+static const u8 edid_header[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+};
+
+ /*
+ * Sanity check the header of the base EDID block. Return 8 if the header
+ * is perfect, down to 0 if it's totally wrong.
+ */
+int drm_edid_header_is_valid(const u8 *raw_edid)
+{
+ int i, score = 0;
+
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
+
+ return score;
+}
+
+static int edid_fixup = 6;
+/*
+ * Sanity check the EDID block (base or extension). Return 0 if the block
+ * doesn't check out, or 1 if it's valid.
+ */
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
+{
+ int i;
+ u8 csum = 0;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (!raw_edid) {
+ WARN_ON(1);
+ return false;
+ }
+ if (edid_fixup > 8 || edid_fixup < 0)
+ edid_fixup = 6;
+
+ if (block == 0) {
+ int score = drm_edid_header_is_valid(raw_edid);
+ if (score == 8)
+ DRM_DEBUG("edid header is perfect");
+ else if (score >= edid_fixup) {
+ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+ (void) memcpy(raw_edid, edid_header, sizeof(edid_header));
+ } else {
+ goto bad;
+ }
+ }
+
+ for (i = 0; i < EDID_LENGTH; i++)
+ csum += raw_edid[i];
+ if (csum) {
+ if (print_bad_edid) {
+ DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+ }
+
+ /* allow CEA to slide through, switches mangle this */
+ if (raw_edid[0] != 0x02)
+ goto bad;
+ }
+
+ /* per-block-type checks */
+ switch (raw_edid[0]) {
+ case 0: /* base */
+ if (edid->version != 1) {
+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ goto bad;
+ }
+
+ if (edid->revision > 4)
+ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+
+bad:
+ if (print_bad_edid) {
+ DRM_DEBUG_KMS("Raw EDID:\n");
+// print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
+ }
+ return false;
+}
+
+/**
+ * drm_edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
+ */
+bool drm_edid_is_valid(struct edid *edid)
+{
+ int i;
+ u8 *raw = (u8 *)edid;
+
+ if (!edid)
+ return false;
+
+ for (i = 0; i <= edid->extensions; i++)
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
+ return false;
+
+ return true;
+}
+
+#define DDC_SEGMENT_ADDR 0x30
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf : EDID data buffer to be filled
+ * \param len : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+static int
+drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+ int block, int len)
+{
+ unsigned char start = block * EDID_LENGTH;
+ unsigned char segment = block >> 1;
+ unsigned char xfers = segment ? 3 : 2;
+ int ret, retries = 5;
+
+ /* The core i2c driver will automatically retry the transfer if the
+ * adapter reports EAGAIN. However, we find that bit-banging transfers
+ * are susceptible to errors under a heavily loaded machine and
+ * generate spurious NAKs and timeouts. Retrying the transfer
+ * of the individual block a few times seems to overcome this.
+ */
+ do {
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_SEGMENT_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &segment,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = (u16)len,
+ .buf = buf,
+ }
+ };
+
+ /*
+ * Avoid sending the segment addr to not upset non-compliant ddc
+ * monitors.
+ */
+ ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
+
+ if (ret == -ENXIO) {
+ DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
+ adapter->name);
+ break;
+ }
+ } while (ret != xfers && --retries);
+
+ return ret == xfers ? 0 : -1;
+}
+
+static bool drm_edid_is_zero(u8 *in_edid, int length)
+{
+ int i;
+ u32 *raw_edid = (u32 *)(uintptr_t)(caddr_t)in_edid;
+
+ for (i = 0; i < length / 4; i++)
+ if (*(raw_edid + i) != 0)
+ return false;
+ return true;
+}
+
+static struct edid *
+drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+ int i, j = 0;
+ u8 *block, valid_extensions = 0;
+ bool print_bad_edid = !connector->bad_edid_counter;
+
+ /* try to allock max memory at first time */
+ if ((block = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1), GFP_KERNEL)) == NULL)
+ return NULL;
+
+ /* base block fetch */
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block, 0, print_bad_edid))
+ break;
+ if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
+ connector->null_edid_counter++;
+ goto carp;
+ }
+ }
+ if (i == 4)
+ goto carp;
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+ return (struct edid *) block;
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter,
+ block + (valid_extensions + 1) * EDID_LENGTH,
+ j, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
+ valid_extensions++;
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_ERROR("%s: Ignoring invalid EDID block %d.\n",
+ drm_get_connector_name(connector), j);
+ }
+
+ if (valid_extensions != block[0x7e]) {
+ block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+ block[0x7e] = valid_extensions;
+ }
+
+ return (struct edid *) block;
+
+carp:
+ if (print_bad_edid) {
+ DRM_DEBUG_KMS("%s: EDID block %d invalid.\n",
+ drm_get_connector_name(connector), j);
+ }
+ connector->bad_edid_counter++;
+
+out:
+ kfree(block, EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1));
+ return NULL;
+}
+
+/**
+ * Probe DDC presence.
+ *
+ * \param adapter : i2c device adaptor
+ * \return 1 on success
+ */
+bool
+drm_probe_ddc(struct i2c_adapter *adapter)
+{
+ unsigned char out;
+
+ return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given i2c channel to grab EDID data if possible. If found,
+ * attach it to the connector.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid = NULL;
+
+ if (drm_probe_ddc(adapter))
+ edid = drm_do_get_edid(connector, adapter);
+
+ return edid;
+}
+
+/*** EDID parsing ***/
+
+/**
+ * edid_vendor - match a string against EDID's obfuscated vendor field
+ * @edid: EDID to match
+ * @vendor: vendor string
+ *
+ * Returns true if @vendor is in @edid, false otherwise
+ */
+static bool edid_vendor(struct edid *edid, char *vendor)
+{
+ char edid_vendor[3];
+
+ edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
+ edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
+ ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
+ edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
+
+ return !strncmp(edid_vendor, vendor, 3);
+}
+
+/**
+ * edid_get_quirks - return quirk flags for a given EDID
+ * @edid: EDID to process
+ *
+ * This tells subsequent routines what fixes they need to apply.
+ */
+static u32 edid_get_quirks(struct edid *edid)
+{
+ struct edid_quirk *quirk;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+ quirk = &edid_quirk_list[i];
+
+ if (edid_vendor(edid, quirk->vendor) &&
+ (EDID_PRODUCT_ID(edid) == quirk->product_id))
+ return quirk->quirks;
+ }
+
+ return 0;
+}
+
+#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
+#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
+
+/**
+ * edid_fixup_preferred - set preferred modes based on quirk list
+ * @connector: has mode list to fix up
+ * @quirks: quirks list
+ *
+ * Walk the mode list for @connector, clearing the preferred status
+ * on existing modes and setting it anew for the right mode ala @quirks.
+ */
+static void edid_fixup_preferred(struct drm_connector *connector,
+ u32 quirks)
+{
+ struct drm_display_mode *t, *cur_mode, *preferred_mode;
+ int target_refresh = 0;
+
+ if (list_empty(&connector->probed_modes))
+ return;
+
+ if (quirks & EDID_QUIRK_PREFER_LARGE_60)
+ target_refresh = 60;
+ if (quirks & EDID_QUIRK_PREFER_LARGE_75)
+ target_refresh = 75;
+
+ preferred_mode = list_first_entry(&connector->probed_modes,
+ struct drm_display_mode, head);
+
+ list_for_each_entry_safe(cur_mode, t, struct drm_display_mode, &connector->probed_modes, head) {
+ cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+
+ if (cur_mode == preferred_mode)
+ continue;
+
+ /* Largest mode is preferred */
+ if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
+ preferred_mode = cur_mode;
+
+ /* At a given size, try to get closest to target refresh */
+ if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
+ MODE_REFRESH_DIFF(cur_mode, target_refresh) <
+ MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
+ preferred_mode = cur_mode;
+ }
+ }
+
+ preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
+}
+
+static bool
+mode_is_rb(const struct drm_display_mode *mode)
+{
+ return (mode->htotal - mode->hdisplay == 160) &&
+ (mode->hsync_end - mode->hdisplay == 80) &&
+ (mode->hsync_end - mode->hsync_start == 32) &&
+ (mode->vsync_start - mode->vdisplay == 3);
+}
+
+/*
+ * drm_mode_find_dmt - Create a copy of a mode if present in DMT
+ * @dev: Device to duplicate against
+ * @hsize: Mode width
+ * @vsize: Mode height
+ * @fresh: Mode refresh rate
+ * @rb: Mode reduced-blanking-ness
+ *
+ * Walk the DMT mode list looking for a match for the given parameters.
+ * Return a newly allocated copy of the mode, or NULL if not found.
+ */
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh,
+ bool rb)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
+ if (hsize != ptr->hdisplay)
+ continue;
+ if (vsize != ptr->vdisplay)
+ continue;
+ if (fresh != drm_mode_vrefresh(ptr))
+ continue;
+ if (rb != mode_is_rb(ptr))
+ continue;
+
+ return drm_mode_duplicate(dev, ptr);
+ }
+
+ return NULL;
+}
+
+typedef void detailed_cb(struct detailed_timing *timing, void *closure);
+
+static void
+cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+ int i, n = 0;
+ u8 d = ext[0x02];
+ u8 *det_base = ext + d;
+
+ n = (127 - d) / 18;
+ for (i = 0; i < n; i++)
+ cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
+vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+ unsigned int i, n = min((int)ext[0x02], 6);
+ u8 *det_base = ext + 5;
+
+ if (ext[0x01] != 1)
+ return; /* unknown version */
+
+ for (i = 0; i < n; i++)
+ cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
+{
+ int i;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (edid == NULL)
+ return;
+
+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
+ cb(&(edid->detailed_timings[i]), closure);
+
+ for (i = 1; i <= raw_edid[0x7e]; i++) {
+ u8 *ext = raw_edid + (i * EDID_LENGTH);
+ switch (*ext) {
+ case CEA_EXT:
+ cea_for_each_detailed_block(ext, cb, closure);
+ break;
+ case VTB_EXT:
+ vtb_for_each_detailed_block(ext, cb, closure);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+is_rb(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE)
+ if (r[15] & 0x10)
+ *(bool *)data = true;
+}
+
+/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
+static bool
+drm_monitor_supports_rb(struct edid *edid)
+{
+ if (edid->revision >= 4) {
+ bool ret = false;
+ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+ return ret;
+ }
+
+ return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+}
+
+static void
+find_gtf2(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+ *(u8 **)data = r;
+}
+
+/* Secondary GTF curve kicks in above some break frequency */
+static int
+drm_gtf2_hbreak(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[12] * 2) : 0;
+}
+
+static int
+drm_gtf2_2c(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[13] : 0;
+}
+
+static int
+drm_gtf2_m(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[15] << 8) + r[14] : 0;
+}
+
+static int
+drm_gtf2_k(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[16] : 0;
+}
+
+static int
+drm_gtf2_2j(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[17] : 0;
+}
+
+/**
+ * standard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+ if (edid->revision >= 2) {
+ if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+ return LEVEL_CVT;
+ if (drm_gtf2_hbreak(edid))
+ return LEVEL_GTF2;
+ return LEVEL_GTF;
+ }
+ return LEVEL_DMT;
+}
+
+/*
+ * 0 is reserved. The spec says 0x01 fill for unused timings. Some old
+ * monitors fill with ascii space (0x20) instead.
+ */
+static int
+bad_std_timing(u8 a, u8 b)
+{
+ return (a == 0x00 && b == 0x00) ||
+ (a == 0x01 && b == 0x01) ||
+ (a == 0x20 && b == 0x20);
+}
+
+/**
+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
+ * @t: standard timing params
+ * @timing_level: standard timing level
+ *
+ * Take the standard timing params (in this case width, aspect, and refresh)
+ * and convert them into a real mode using CVT/GTF/DMT.
+ */
+static struct drm_display_mode *
+drm_mode_std(struct drm_connector *connector, struct edid *edid,
+ struct std_timing *t, int revision)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *m, *mode = NULL;
+ int hsize, vsize;
+ int vrefresh_rate;
+ unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
+ >> EDID_TIMING_ASPECT_SHIFT;
+ unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
+ >> EDID_TIMING_VFREQ_SHIFT;
+ int timing_level = standard_timing_level(edid);
+
+ if (bad_std_timing(t->hsize, t->vfreq_aspect))
+ return NULL;
+
+ /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
+ hsize = t->hsize * 8 + 248;
+ /* vrefresh_rate = vfreq + 60 */
+ vrefresh_rate = vfreq + 60;
+ /* the vdisplay is calculated based on the aspect ratio */
+ if (aspect_ratio == 0) {
+ if (revision < 3)
+ vsize = hsize;
+ else
+ vsize = (hsize * 10) / 16;
+ } else if (aspect_ratio == 1)
+ vsize = (hsize * 3) / 4;
+ else if (aspect_ratio == 2)
+ vsize = (hsize * 4) / 5;
+ else
+ vsize = (hsize * 9) / 16;
+
+ /* HDTV hack, part 1 */
+ if (vrefresh_rate == 60 &&
+ ((hsize == 1360 && vsize == 765) ||
+ (hsize == 1368 && vsize == 769))) {
+ hsize = 1366;
+ vsize = 768;
+ }
+
+ /*
+ * If this connector already has a mode for this size and refresh
+ * rate (because it came from detailed or CVT info), use that
+ * instead. This way we don't have to guess at interlace or
+ * reduced blanking.
+ */
+ list_for_each_entry(m, struct drm_display_mode, &connector->probed_modes, head)
+ if (m->hdisplay == hsize && m->vdisplay == vsize &&
+ drm_mode_vrefresh(m) == vrefresh_rate)
+ return NULL;
+
+ /* HDTV hack, part 2 */
+ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
+ mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
+ false);
+ if (mode != NULL) {
+ mode->hdisplay = 1366;
+ mode->hsync_start = mode->hsync_start - 1;
+ mode->hsync_end = mode->hsync_end - 1;
+ }
+ return mode;
+ }
+
+ /* check whether it can be found in default mode table */
+ if (drm_monitor_supports_rb(edid)) {
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
+ true);
+ if (mode)
+ return mode;
+ }
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false);
+ if (mode)
+ return mode;
+
+ /* okay, generate it */
+ switch (timing_level) {
+ case LEVEL_DMT:
+ break;
+ case LEVEL_GTF:
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ break;
+ case LEVEL_GTF2:
+ /*
+ * This is potentially wrong if there's ever a monitor with
+ * more than one ranges section, each claiming a different
+ * secondary GTF curve. Please don't do that.
+ */
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ if (!mode)
+ return NULL;
+ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+ drm_mode_destroy(dev, mode);
+ mode = drm_gtf_mode_complex(dev, hsize, vsize,
+ vrefresh_rate, 0, 0,
+ drm_gtf2_m(edid),
+ drm_gtf2_2c(edid),
+ drm_gtf2_k(edid),
+ drm_gtf2_2j(edid));
+ }
+ break;
+ case LEVEL_CVT:
+ mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+ false);
+ break;
+ }
+ return mode;
+}
+
+/*
+ * EDID is delightfully ambiguous about how interlaced modes are to be
+ * encoded. Our internal representation is of frame height, but some
+ * HDTV detailed timings are encoded as field height.
+ *
+ * The format list here is from CEA, in frame size. Technically we
+ * should be checking refresh rate too. Whatever.
+ */
+static void
+drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
+ struct detailed_pixel_timing *pt)
+{
+ int i;
+ static const struct {
+ int w, h;
+ } cea_interlaced[] = {
+ { 1920, 1080 },
+ { 720, 480 },
+ { 1440, 480 },
+ { 2880, 480 },
+ { 720, 576 },
+ { 1440, 576 },
+ { 2880, 576 },
+ };
+
+ if (!(pt->misc & DRM_EDID_PT_INTERLACED))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
+ if ((mode->hdisplay == cea_interlaced[i].w) &&
+ (mode->vdisplay == cea_interlaced[i].h / 2)) {
+ mode->vdisplay *= 2;
+ mode->vsync_start *= 2;
+ mode->vsync_end *= 2;
+ mode->vtotal *= 2;
+ mode->vtotal |= 1;
+ }
+ }
+
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+}
+
+/**
+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
+ * @dev: DRM device (needed to create new mode)
+ * @edid: EDID block
+ * @timing: EDID detailed timing info
+ * @quirks: quirks to apply
+ *
+ * An EDID detailed timing block contains enough info for us to create and
+ * return a new struct drm_display_mode.
+ */
+static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ struct edid *edid,
+ struct detailed_timing *timing,
+ u32 quirks)
+{
+ struct drm_display_mode *mode;
+ struct detailed_pixel_timing *pt = &timing->data.pixel_data;
+ unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
+ unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
+ unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
+ unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
+ unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
+ unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
+ unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
+
+ /* ignore tiny modes */
+ if (hactive < 64 || vactive < 64)
+ return NULL;
+
+ if (pt->misc & DRM_EDID_PT_STEREO) {
+ DRM_ERROR("stereo mode not supported\n");
+ return NULL;
+ }
+ if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
+ DRM_ERROR("integrated sync not supported\n");
+ }
+
+ /* it is incorrect if hsync/vsync width is zero */
+ if (!hsync_pulse_width || !vsync_pulse_width) {
+ DRM_DEBUG_KMS("Incorrect Detailed timing. "
+ "Wrong Hsync/Vsync pulse width\n");
+ return NULL;
+ }
+
+ if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+ mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+ if (!mode)
+ return NULL;
+
+ goto set_size;
+ }
+
+ mode = drm_mode_create(dev);
+ if (!mode)
+ return NULL;
+
+ if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+ timing->pixel_clock = cpu_to_le16(1088);
+
+ mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
+
+ mode->hdisplay = hactive;
+ mode->hsync_start = mode->hdisplay + hsync_offset;
+ mode->hsync_end = mode->hsync_start + hsync_pulse_width;
+ mode->htotal = mode->hdisplay + hblank;
+
+ mode->vdisplay = vactive;
+ mode->vsync_start = mode->vdisplay + vsync_offset;
+ mode->vsync_end = mode->vsync_start + vsync_pulse_width;
+ mode->vtotal = mode->vdisplay + vblank;
+
+ /* Some EDIDs have bogus h/vtotal values */
+ if (mode->hsync_end > mode->htotal)
+ mode->htotal = mode->hsync_end + 1;
+ if (mode->vsync_end > mode->vtotal)
+ mode->vtotal = mode->vsync_end + 1;
+
+ drm_mode_do_interlace_quirk(mode, pt);
+
+ if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
+ }
+
+ mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+ mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+
+set_size:
+ mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+ mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
+
+ if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
+ mode->width_mm *= 10;
+ mode->height_mm *= 10;
+ }
+
+ if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+ mode->width_mm = edid->width_cm * 10;
+ mode->height_mm = edid->height_cm * 10;
+ }
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_name(mode);
+
+ return mode;
+}
+
+static bool
+mode_in_hsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
+{
+ int hsync, hmin, hmax;
+
+ hmin = t[7];
+ if (edid->revision >= 4)
+ hmin += ((t[4] & 0x04) ? 255 : 0);
+ hmax = t[8];
+ if (edid->revision >= 4)
+ hmax += ((t[4] & 0x08) ? 255 : 0);
+ hsync = drm_mode_hsync(mode);
+
+ return (hsync <= hmax && hsync >= hmin);
+}
+
+static bool
+mode_in_vsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
+{
+ int vsync, vmin, vmax;
+
+ vmin = t[5];
+ if (edid->revision >= 4)
+ vmin += ((t[4] & 0x01) ? 255 : 0);
+ vmax = t[6];
+ if (edid->revision >= 4)
+ vmax += ((t[4] & 0x02) ? 255 : 0);
+ vsync = drm_mode_vrefresh(mode);
+
+ return (vsync <= vmax && vsync >= vmin);
+}
+
+static u32
+range_pixel_clock(struct edid *edid, u8 *t)
+{
+ /* unspecified */
+ if (t[9] == 0 || t[9] == 255)
+ return 0;
+
+ /* 1.4 with CVT support gives us real precision, yay */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ return (t[9] * 10000) - ((t[12] >> 2) * 250);
+
+ /* 1.3 is pathetic, so fuzz up a bit */
+ return t[9] * 10000 + 5001;
+}
+
+static bool
+mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ u32 max_clock;
+ u8 *t = (u8 *)timing;
+
+ if (!mode_in_hsync_range(mode, edid, t))
+ return false;
+
+ if (!mode_in_vsync_range(mode, edid, t))
+ return false;
+
+ if ((max_clock = range_pixel_clock(edid, t)))
+ if (mode->clock > max_clock)
+ return false;
+
+ /* 1.4 max horizontal check */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
+ return false;
+
+ if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+ return false;
+
+ return true;
+}
+
+static bool valid_inferred_mode(const struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ struct drm_display_mode *m;
+ bool ok = false;
+
+ list_for_each_entry(m, struct drm_display_mode, &connector->probed_modes, head) {
+ if (mode->hdisplay == m->hdisplay &&
+ mode->vdisplay == m->vdisplay &&
+ drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+ return false; /* duplicated */
+ if (mode->hdisplay <= m->hdisplay &&
+ mode->vdisplay <= m->vdisplay)
+ ok = true;
+ }
+ return ok;
+}
+
+static int
+drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
+ if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+ valid_inferred_mode(connector, drm_dmt_modes + i)) {
+ newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ return modes;
+}
+
+/* fix up 1366x768 mode from 1368x768;
+ * GFT/CVT can't express 1366 width which isn't dividable by 8
+ */
+static void fixup_mode_1366x768(struct drm_display_mode *mode)
+{
+ if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
+ mode->hdisplay = 1366;
+ mode->hsync_start--;
+ mode->hsync_end--;
+ drm_mode_set_name(mode);
+ }
+}
+
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
+ const struct minimode *m = &extra_modes[i];
+ newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
+ if (!newmode)
+ return modes;
+
+ fixup_mode_1366x768(newmode);
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
+ drm_mode_destroy(dev, newmode);
+ continue;
+ }
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ return modes;
+}
+
+static int
+drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ bool rb = drm_monitor_supports_rb(edid);
+
+ for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
+ const struct minimode *m = &extra_modes[i];
+ newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
+ if (!newmode)
+ return modes;
+
+ fixup_mode_1366x768(newmode);
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
+ drm_mode_destroy(dev, newmode);
+ continue;
+ }
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ return modes;
+}
+
+static void
+do_inferred_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ struct detailed_data_monitor_range *range = &data->data.range;
+
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ return;
+
+ closure->modes += drm_dmt_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+
+ if (!version_greater(closure->edid, 1, 1))
+ return; /* GTF not defined yet */
+
+ switch (range->flags) {
+ case 0x02: /* secondary gtf, XXX could do more */
+ case 0x00: /* default gtf */
+ closure->modes += drm_gtf_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+ break;
+ case 0x04: /* cvt, only in 1.4+ */
+ if (!version_greater(closure->edid, 1, 3))
+ break;
+
+ closure->modes += drm_cvt_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+ break;
+ case 0x01: /* just the ranges, no formula */
+ default:
+ break;
+ }
+}
+
+static int
+add_inferred_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid, do_inferred_modes,
+ &closure);
+
+ return closure.modes;
+}
+
+static int
+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
+{
+ int i, j, m, modes = 0;
+ struct drm_display_mode *mode;
+ u8 *est = ((u8 *)timing) + 5;
+
+ for (i = 0; i < 6; i++) {
+ for (j = 7; j > 0; j--) {
+ m = (i * 8) + (7 - j);
+ if (m >= ARRAY_SIZE(est3_modes))
+ break;
+ if (est[i] & (1 << j)) {
+ mode = drm_mode_find_dmt(connector->dev,
+ est3_modes[m].w,
+ est3_modes[m].h,
+ est3_modes[m].r,
+ est3_modes[m].rb);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
+static void
+do_established_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+
+ if (data->type == EDID_DETAIL_EST_TIMINGS)
+ closure->modes += drm_est3_modes(closure->connector, timing);
+}
+
+/**
+ * add_established_modes - get est. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+ * (defined above). Tease them out and add them to the global modes list.
+ */
+static int
+add_established_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ unsigned long est_bits = edid->established_timings.t1 |
+ (edid->established_timings.t2 << 8) |
+ ((edid->established_timings.mfg_rsvd & 0x80) << 9);
+ int i, modes = 0;
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ for (i = 0; i <= EDID_EST_TIMINGS; i++) {
+ if (est_bits & (1<<i)) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid,
+ do_established_modes, &closure);
+
+ return modes + closure.modes;
+}
+
+static void
+do_standard_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ struct drm_connector *connector = closure->connector;
+ struct edid *edid = closure->edid;
+
+ if (data->type == EDID_DETAIL_STD_MODES) {
+ int i;
+ for (i = 0; i < 6; i++) {
+ struct std_timing *std;
+ struct drm_display_mode *newmode;
+
+ std = &data->data.timings[i];
+ newmode = drm_mode_std(connector, edid, std,
+ edid->revision);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ closure->modes++;
+ }
+ }
+ }
+}
+
+/**
+ * add_standard_modes - get std. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Standard modes can be calculated using the appropriate standard (DMT,
+ * GTF or CVT. Grab them from @edid and add them to the list.
+ */
+static int
+add_standard_modes(struct drm_connector *connector, struct edid *edid)
+{
+ int i, modes = 0;
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ for (i = 0; i < EDID_STD_TIMINGS; i++) {
+ struct drm_display_mode *newmode;
+
+ newmode = drm_mode_std(connector, edid,
+ &edid->standard_timings[i],
+ edid->revision);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid, do_standard_modes,
+ &closure);
+
+ /* XXX should also look for standard codes in VTB blocks */
+
+ return modes + closure.modes;
+}
+
+static int drm_cvt_modes(struct drm_connector *connector,
+ struct detailed_timing *timing)
+{
+ int i, j, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ struct cvt_timing *cvt;
+ const int rates[] = { 60, 85, 75, 60, 50 };
+ const u8 empty[3] = { 0, 0, 0 };
+
+ for (i = 0; i < 4; i++) {
+ int width, height;
+ cvt = &(timing->data.other_data.data.cvt[i]);
+
+ if (!memcmp(cvt->code, empty, 3))
+ continue;
+
+ height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
+ switch (cvt->code[1] & 0x0c) {
+ case 0x00:
+ width = height * 4 / 3;
+ break;
+ case 0x04:
+ width = height * 16 / 9;
+ break;
+ case 0x08:
+ width = height * 16 / 10;
+ break;
+ case 0x0c:
+ width = height * 15 / 9;
+ break;
+ }
+
+ for (j = 1; j < 5; j++) {
+ if (cvt->code[2] & (1 << j)) {
+ newmode = drm_cvt_mode(dev, width, height,
+ rates[j], j == 0,
+ false, false);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
+static void
+do_cvt_mode(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+
+ if (data->type == EDID_DETAIL_CVT_3BYTE)
+ closure->modes += drm_cvt_modes(closure->connector, timing);
+}
+
+static int
+add_cvt_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ if (version_greater(edid, 1, 2))
+ drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure);
+
+ /* XXX should also look for CVT codes in VTB blocks */
+
+ return closure.modes;
+}
+
+static void
+do_detailed_mode(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct drm_display_mode *newmode;
+
+ if (timing->pixel_clock) {
+ newmode = drm_mode_detailed(closure->connector->dev,
+ closure->edid, timing,
+ closure->quirks);
+ if (!newmode)
+ return;
+
+ if (closure->preferred)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(closure->connector, newmode);
+ closure->modes++;
+ closure->preferred = 0;
+ }
+}
+
+/*
+ * add_detailed_modes - Add modes from detailed timings
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+ */
+static int
+add_detailed_modes(struct drm_connector *connector, struct edid *edid,
+ u32 quirks)
+{
+ struct detailed_mode_closure closure = {
+ connector,
+ edid,
+ 1,
+ quirks,
+ 0
+ };
+
+ if (closure.preferred && !version_greater(edid, 1, 3))
+ closure.preferred =
+ (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+
+ drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure);
+
+ return closure.modes;
+}
+
+#define HDMI_IDENTIFIER 0x000C03
+#define AUDIO_BLOCK 0x01
+#define VIDEO_BLOCK 0x02
+#define VENDOR_BLOCK 0x03
+#define SPEAKER_BLOCK 0x04
+#define VIDEO_CAPABILITY_BLOCK 0x07
+#define EDID_BASIC_AUDIO (1 << 6)
+#define EDID_CEA_YCRCB444 (1 << 5)
+#define EDID_CEA_YCRCB422 (1 << 4)
+#define EDID_CEA_VCDB_QS (1 << 6)
+
+/**
+ * Search EDID for CEA extension block.
+ */
+u8 *drm_find_cea_extension(struct edid *edid)
+{
+ u8 *edid_ext = NULL;
+ int i;
+
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+ return NULL;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == CEA_EXT)
+ break;
+ }
+
+ if (i == edid->extensions)
+ return NULL;
+
+ return edid_ext;
+}
+
+/*
+ * Calculate the alternate clock for the CEA mode
+ * (60Hz vs. 59.94Hz etc.)
+ */
+static unsigned int
+cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
+{
+ unsigned int clock = cea_mode->clock;
+
+ if (cea_mode->vrefresh % 6 != 0)
+ return clock;
+
+ /*
+ * edid_cea_modes contains the 59.94Hz
+ * variant for 240 and 480 line modes,
+ * and the 60Hz variant otherwise.
+ */
+ if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480)
+ clock = clock * 1001 / 1000;
+ else
+ clock = DIV_ROUND_UP(clock * 1000, 1001);
+
+ return clock;
+}
+
+/**
+ * drm_match_cea_mode - look for a CEA mode matching given mode
+ * @to_match: display mode
+ *
+ * Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
+ * mode.
+ */
+u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
+{
+ u8 mode;
+
+ if (!to_match->clock)
+ return 0;
+
+ for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
+ const struct drm_display_mode *cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
+ unsigned int clock1, clock2;
+
+ /* Check both 60Hz and 59.94Hz */
+ clock1 = cea_mode->clock;
+ clock2 = cea_mode_alternate_clock(cea_mode);
+
+ if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
+ KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
+ drm_mode_equal_no_clocks(to_match, cea_mode))
+ return mode + 1;
+ }
+ return 0;
+}
+
+static int
+add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *tmp;
+ struct list_head list;
+
+ INIT_LIST_HEAD(&list);
+
+ int modes = 0;
+
+ /* Don't add CEA modes if the CEA extension block is missing */
+ if (!drm_find_cea_extension(edid))
+ return 0;
+
+ /*
+ * Go through all probed modes and create a new mode
+ * with the alternate clock for certain CEA modes.
+ */
+ list_for_each_entry(mode, struct drm_display_mode, &connector->probed_modes, head) {
+ const struct drm_display_mode *cea_mode;
+ struct drm_display_mode *newmode;
+ u8 cea_mode_idx = drm_match_cea_mode(mode) - 1;
+ unsigned int clock1, clock2;
+
+ if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes))
+ continue;
+
+ cea_mode = &edid_cea_modes[cea_mode_idx];
+
+ clock1 = cea_mode->clock;
+ clock2 = cea_mode_alternate_clock(cea_mode);
+
+ if (clock1 == clock2)
+ continue;
+
+ if (mode->clock != clock1 && mode->clock != clock2)
+ continue;
+
+ newmode = drm_mode_duplicate(dev, cea_mode);
+ if (!newmode)
+ continue;
+
+ /*
+ * The current mode could be either variant. Make
+ * sure to pick the "other" clock for the new mode.
+ */
+ if (mode->clock != clock1)
+ newmode->clock = clock1;
+ else
+ newmode->clock = clock2;
+
+ list_add_tail(&newmode->head, &list, (caddr_t)newmode);
+ }
+
+ list_for_each_entry_safe(mode, tmp, struct drm_display_mode, &list, head) {
+ list_del(&mode->head);
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
+
+ return modes;
+}
+
+static int
+do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
+{
+ struct drm_device *dev = connector->dev;
+ u8 * mode, cea_mode;
+ int modes = 0;
+
+ for (mode = db; mode < db + len; mode++) {
+ cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
+ if (cea_mode < ARRAY_SIZE(edid_cea_modes)) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(dev,
+ &edid_cea_modes[cea_mode]);
+ if (newmode) {
+ newmode->vrefresh = 0;
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ return modes;
+}
+
+static int
+cea_db_payload_len(const u8 *db)
+{
+ return db[0] & 0x1f;
+}
+
+static int
+cea_db_tag(const u8 *db)
+{
+ return db[0] >> 5;
+}
+
+static int
+cea_revision(const u8 *cea)
+{
+ return cea[1];
+}
+
+static int
+cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+ /* Data block offset in CEA extension block */
+ *start = 4;
+ *end = cea[2];
+ if (*end == 0)
+ *end = 127;
+ if (*end < 4 || *end > 127)
+ return -ERANGE;
+ return 0;
+}
+
+#define for_each_cea_db(cea, i, start, end) \
+ for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
+
+static int
+add_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+ u8 * cea = drm_find_cea_extension(edid);
+ u8 * db, dbl;
+ int modes = 0;
+
+ if (cea && cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end))
+ return 0;
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ dbl = cea_db_payload_len(db);
+
+ if (cea_db_tag(db) == VIDEO_BLOCK)
+ modes += do_cea_modes (connector, db+1, dbl);
+ }
+ }
+
+ return modes;
+}
+
+static void
+parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
+{
+ u8 len = cea_db_payload_len(db);
+
+ if (len >= 6) {
+ connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
+ connector->dvi_dual = db[6] & 1;
+ }
+ if (len >= 7)
+ connector->max_tmds_clock = db[7] * 5;
+ if (len >= 8) {
+ connector->latency_present[0] = db[8] >> 7;
+ connector->latency_present[1] = (db[8] >> 6) & 1;
+ }
+ if (len >= 9)
+ connector->video_latency[0] = db[9];
+ if (len >= 10)
+ connector->audio_latency[0] = db[10];
+ if (len >= 11)
+ connector->video_latency[1] = db[11];
+ if (len >= 12)
+ connector->audio_latency[1] = db[12];
+
+ DRM_LOG_KMS("HDMI: DVI dual %d, "
+ "max TMDS clock %d, "
+ "latency present %d %d, "
+ "video latency %d %d, "
+ "audio latency %d %d\n",
+ connector->dvi_dual,
+ connector->max_tmds_clock,
+ (int) connector->latency_present[0],
+ (int) connector->latency_present[1],
+ connector->video_latency[0],
+ connector->video_latency[1],
+ connector->audio_latency[0],
+ connector->audio_latency[1]);
+}
+
+static void
+monitor_name(struct detailed_timing *t, void *data)
+{
+ if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
+ *(u8 **)data = t->data.other_data.data.str.str;
+}
+
+static bool cea_db_is_hdmi_vsdb(const u8 *db)
+{
+ int hdmi_id;
+
+ if (cea_db_tag(db) != VENDOR_BLOCK)
+ return false;
+
+ if (cea_db_payload_len(db) < 5)
+ return false;
+
+ hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+ return hdmi_id == HDMI_IDENTIFIER;
+}
+
+/**
+ * drm_edid_to_eld - build ELD from EDID
+ * @connector: connector corresponding to the HDMI/DP sink
+ * @edid: EDID to parse
+ *
+ * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver.
+ * Some ELD fields are left to the graphics driver caller:
+ * - Conn_Type
+ * - HDCP
+ * - Port_ID
+ */
+void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
+{
+ uint8_t *eld = connector->eld;
+ u8 *cea;
+ u8 *name;
+ u8 *db;
+ int sad_count = 0;
+ int mnl;
+ int dbl;
+
+ (void) memset(eld, 0, sizeof(connector->eld));
+
+ cea = drm_find_cea_extension(edid);
+ if (!cea) {
+ DRM_DEBUG_KMS("ELD: no CEA Extension found\n");
+ return;
+ }
+
+ name = NULL;
+ drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
+ for (mnl = 0; name && mnl < 13; mnl++) {
+ if (name[mnl] == 0x0a)
+ break;
+ eld[20 + mnl] = name[mnl];
+ }
+ eld[4] = (cea[1] << 5) | mnl;
+ DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20);
+
+ eld[0] = 2 << 3; /* ELD version: 2 */
+
+ eld[16] = edid->mfg_id[0];
+ eld[17] = edid->mfg_id[1];
+ eld[18] = edid->prod_code[0];
+ eld[19] = edid->prod_code[1];
+
+ if (cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end)) {
+ start = 0;
+ end = 0;
+ }
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ dbl = cea_db_payload_len(db);
+
+ switch (cea_db_tag(db)) {
+ case AUDIO_BLOCK:
+ /* Audio Data Block, contains SADs */
+ sad_count = dbl / 3;
+ if (dbl >= 1)
+ (void) memcpy(eld + 20 + mnl, &db[1], dbl);
+ break;
+ case SPEAKER_BLOCK:
+ /* Speaker Allocation Data Block */
+ if (dbl >= 1)
+ eld[7] = db[1];
+ break;
+ case VENDOR_BLOCK:
+ /* HDMI Vendor-Specific Data Block */
+ if (cea_db_is_hdmi_vsdb(db))
+ parse_hdmi_vsdb(connector, db);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ eld[5] |= sad_count << 4;
+ eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
+
+ DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
+}
+
+/**
+ * drm_edid_to_sad - extracts SADs from EDID
+ * @edid: EDID to parse
+ * @sads: pointer that will be set to the extracted SADs
+ *
+ * Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it.
+ * Note: returned pointer needs to be kfreed
+ *
+ * Return number of found SADs or negative number on error.
+ */
+int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
+{
+ int count = 0;
+ int i, start, end, dbl;
+ u8 *cea;
+
+ cea = drm_find_cea_extension(edid);
+ if (!cea) {
+ DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
+ return -ENOENT;
+ }
+
+ if (cea_revision(cea) < 3) {
+ DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
+ return -ENOTSUP;
+ }
+
+ if (cea_db_offsets(cea, &start, &end)) {
+ DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
+ return -EPROTO;
+ }
+
+ for_each_cea_db(cea, i, start, end) {
+ u8 *db = &cea[i];
+
+ if (cea_db_tag(db) == AUDIO_BLOCK) {
+ int j;
+ dbl = cea_db_payload_len(db);
+
+ count = dbl / 3; /* SAD is 3B */
+ *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL);
+ if (!*sads)
+ return -ENOMEM;
+ for (j = 0; j < count; j++) {
+ u8 *sad = &db[1 + j * 3];
+
+ (*sads)[j].format = (sad[0] & 0x78) >> 3;
+ (*sads)[j].channels = sad[0] & 0x7;
+ (*sads)[j].freq = sad[1] & 0x7F;
+ (*sads)[j].byte2 = sad[2];
+ }
+ break;
+ }
+ }
+
+ return count;
+}
+
+/**
+ * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
+ * @connector: connector associated with the HDMI/DP sink
+ * @mode: the display mode
+ */
+int drm_av_sync_delay(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ int a, v;
+
+ if (!connector->latency_present[0])
+ return 0;
+ if (!connector->latency_present[1])
+ i = 0;
+
+ a = connector->audio_latency[i];
+ v = connector->video_latency[i];
+
+ /*
+ * HDMI/DP sink doesn't support audio or video?
+ */
+ if (a == 255 || v == 255)
+ return 0;
+
+ /*
+ * Convert raw EDID values to millisecond.
+ * Treat unknown latency as 0ms.
+ */
+ if (a)
+ a = min(2 * (a - 1), 500);
+ if (v)
+ v = min(2 * (v - 1), 500);
+
+ return max(v - a, 0);
+}
+
+/**
+ * drm_select_eld - select one ELD from multiple HDMI/DP sinks
+ * @encoder: the encoder just changed display mode
+ * @mode: the adjusted display mode
+ *
+ * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
+ * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
+ */
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+ /* LINTED E_FUNC_ARG_UNUSED */
+ struct drm_display_mode *mode)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder && connector->eld[0])
+ return connector;
+
+ return NULL;
+}
+
+/**
+ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+ * @edid: monitor EDID information
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI, false if not or unknown.
+ */
+bool drm_detect_hdmi_monitor(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i;
+ int start_offset, end_offset;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return false;
+
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+ return false;
+
+ /*
+ * Because HDMI identifier is in Vendor Specific Block,
+ * search it from all data blocks of CEA extension.
+ */
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * drm_detect_monitor_audio - check monitor audio capability
+ *
+ * Monitor should have CEA extension block.
+ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
+ * audio' only. If there is any audio extension block and supported
+ * audio format, assume at least 'basic audio' support, even if 'basic
+ * audio' is not defined in EDID.
+ *
+ */
+bool drm_detect_monitor_audio(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, j;
+ bool has_audio = false;
+ int start_offset, end_offset;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ goto end;
+
+ has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
+
+ if (has_audio) {
+ DRM_DEBUG_KMS("Monitor has basic audio support\n");
+ goto end;
+ }
+
+ if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+ goto end;
+
+ for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+ if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
+ has_audio = true;
+ for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
+ DRM_DEBUG_KMS("CEA audio format %d\n",
+ (edid_ext[i + j] >> 3) & 0xf);
+ goto end;
+ }
+ }
+end:
+ return has_audio;
+}
+
+/**
+ * drm_rgb_quant_range_selectable - is RGB quantization range selectable?
+ *
+ * Check whether the monitor reports the RGB quantization range selection
+ * as supported. The AVI infoframe can then be used to inform the monitor
+ * which quantization range (full or limited) is used.
+ */
+bool drm_rgb_quant_range_selectable(struct edid *edid)
+{
+ u8 *edid_ext;
+ int i, start, end;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return false;
+
+ if (cea_db_offsets(edid_ext, &start, &end))
+ return false;
+
+ for_each_cea_db(edid_ext, i, start, end) {
+ if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK &&
+ cea_db_payload_len(&edid_ext[i]) == 2) {
+ DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
+ return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * drm_add_display_info - pull display info out if present
+ * @edid: EDID data
+ * @info: display info (attached to connector)
+ *
+ * Grab any available display info and stuff it into the drm_display_info
+ * structure that's part of the connector. Useful for tracking bpp and
+ * color spaces.
+ */
+static void drm_add_display_info(struct edid *edid,
+ struct drm_display_info *info)
+{
+ u8 *edid_ext;
+
+ info->width_mm = edid->width_cm * 10;
+ info->height_mm = edid->height_cm * 10;
+
+ /* driver figures it out in this case */
+ info->bpc = 0;
+ info->color_formats = 0;
+
+ if (edid->revision < 3)
+ return;
+
+ if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
+ return;
+
+ /* Get data from CEA blocks if present */
+ edid_ext = drm_find_cea_extension(edid);
+ if (edid_ext) {
+ info->cea_rev = edid_ext[1];
+
+ /* The existence of a CEA block should imply RGB support */
+ info->color_formats = DRM_COLOR_FORMAT_RGB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+ }
+
+ /* Only defined for 1.4 with digital displays */
+ if (edid->revision < 4)
+ return;
+
+ switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
+ case DRM_EDID_DIGITAL_DEPTH_6:
+ info->bpc = 6;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_8:
+ info->bpc = 8;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_10:
+ info->bpc = 10;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_12:
+ info->bpc = 12;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_14:
+ info->bpc = 14;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_16:
+ info->bpc = 16;
+ break;
+ case DRM_EDID_DIGITAL_DEPTH_UNDEF:
+ default:
+ info->bpc = 0;
+ break;
+ }
+
+ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+}
+
+/**
+ * drm_add_edid_modes - add modes from EDID data, if available
+ * @connector: connector we're probing
+ * @edid: edid data
+ *
+ * Add the specified modes to the connector's mode list.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+{
+ int num_modes = 0;
+ u32 quirks;
+
+ if (edid == NULL) {
+ return 0;
+ }
+ if (!drm_edid_is_valid(edid)) {
+ DRM_ERROR("%s: EDID invalid.\n",
+ drm_get_connector_name(connector));
+ return 0;
+ }
+
+ quirks = edid_get_quirks(edid);
+
+ /*
+ * EDID spec says modes should be preferred in this order:
+ * - preferred detailed mode
+ * - other detailed modes from base block
+ * - detailed modes from extension blocks
+ * - CVT 3-byte code modes
+ * - standard timing codes
+ * - established timing codes
+ * - modes inferred from GTF or CVT range information
+ *
+ * We get this pretty much right.
+ *
+ * XXX order for additional mode types in extension blocks?
+ */
+ num_modes += add_detailed_modes(connector, edid, quirks);
+ num_modes += add_cvt_modes(connector, edid);
+ num_modes += add_standard_modes(connector, edid);
+ num_modes += add_established_modes(connector, edid);
+ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ num_modes += add_inferred_modes(connector, edid);
+ num_modes += add_cea_modes(connector, edid);
+ num_modes += add_alternate_cea_modes(connector, edid);
+
+ if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ edid_fixup_preferred(connector, quirks);
+
+ drm_add_display_info(edid, &connector->display_info);
+
+ return num_modes;
+}
+
+/**
+ * drm_add_modes_noedid - add modes for the connectors without EDID
+ * @connector: connector we're probing
+ * @hdisplay: the horizontal display limit
+ * @vdisplay: the vertical display limit
+ *
+ * Add the specified modes to the connector's mode list. Only when the
+ * hdisplay/vdisplay is not beyond the given limit, it will be added.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_modes_noedid(struct drm_connector *connector,
+ int hdisplay, int vdisplay)
+{
+ int i, count, num_modes = 0;
+ struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+
+ count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+ if (hdisplay < 0)
+ hdisplay = 0;
+ if (vdisplay < 0)
+ vdisplay = 0;
+
+ for (i = 0; i < count; i++) {
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
+ if (hdisplay && vdisplay) {
+ /*
+ * Only when two are valid, they will be used to check
+ * whether the mode should be added to the mode list of
+ * the connector.
+ */
+ if (ptr->hdisplay > hdisplay ||
+ ptr->vdisplay > vdisplay)
+ continue;
+ }
+ if (drm_mode_vrefresh(ptr) > 61)
+ continue;
+ mode = drm_mode_duplicate(dev, ptr);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+ }
+ return num_modes;
+}
+
diff --git a/usr/src/uts/common/io/drm/drm_fb_helper.c b/usr/src/uts/common/io/drm/drm_fb_helper.c
new file mode 100644
index 0000000..a8078bf
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_fb_helper.c
@@ -0,0 +1,958 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008, 2013, Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+#include "drm_crtc_helper.h"
+
+
+struct list_head kernel_fb_helper_list;
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independantely from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ *
+ * Initialization is done as a three-step process with drm_fb_helper_init(),
+ * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
+ * Drivers with fancier requirements than the default beheviour can override the
+ * second step with their own code. Teardown is done with drm_fb_helper_fini().
+ *
+ * At runtime drivers should restore the fbdev console by calling
+ * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
+ * should also notify the fb helper code from updates to the output
+ * configuration by calling drm_fb_helper_hotplug_event(). For easier
+ * integration with the output polling code in drm_crtc_helper.c the modeset
+ * code proves a ->output_poll_changed callback.
+ *
+ * All other functions exported by the fb helper library can be used to
+ * implement the fbdev driver interface by the driver.
+ */
+
+/* simple single crtc case helper function
+ * drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
+ * emulation helper
+ * @fb_helper: fbdev initialized with drm_fb_helper_init
+ *
+ * This functions adds all the available connectors for use with the given
+ * fb_helper. This is a separate step to allow drivers to freely assign
+ * connectors to the fbdev, e.g. if some are reserved for special purposes or
+ * not adequate to be used for the fbcon.
+ *
+ * Since this is part of the initial setup before the fbdev is published, no
+ * locking is required.
+ */
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ int i;
+
+ list_for_each_entry(connector, struct drm_connector, &dev->mode_config.connector_list, head) {
+ struct drm_fb_helper_connector *fb_helper_connector;
+
+ fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+ if (!fb_helper_connector)
+ goto fail;
+
+ fb_helper_connector->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+ }
+ return 0;
+fail:
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ kfree(fb_helper->connector_info[i], sizeof(struct drm_fb_helper_connector));
+ fb_helper->connector_info[i] = NULL;
+ }
+ fb_helper->connector_count = 0;
+ return -ENOMEM;
+}
+
+static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ struct gfxp_bm_fb_info fb_info;
+ int i;
+
+ gfxp_bm_getfb_info(dev->vgatext->private, &fb_info);
+ for (i = 0; i < fb_helper->connector_count; i++) {
+
+ fb_helper_conn = fb_helper->connector_info[i];
+
+ struct drm_cmdline_mode *cmdline_mode;
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+ cmdline_mode->specified = true;
+ cmdline_mode->xres = fb_info.xres;
+ cmdline_mode->yres = fb_info.yres;
+ cmdline_mode->refresh_specified = true;
+ cmdline_mode->refresh = 60;
+ cmdline_mode->bpp_specified = true;
+ cmdline_mode->bpp = fb_info.depth;
+ }
+ return 0;
+}
+
+static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
+{
+ uint16_t *r_base, *g_base, *b_base;
+ int i;
+
+ if (helper->funcs->gamma_get == NULL)
+ return;
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + crtc->gamma_size;
+ b_base = g_base + crtc->gamma_size;
+
+ for (i = 0; i < crtc->gamma_size; i++)
+ helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
+}
+
+static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
+{
+ uint16_t *r_base, *g_base, *b_base;
+
+ if (crtc->funcs->gamma_set == NULL)
+ return;
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + crtc->gamma_size;
+ b_base = g_base + crtc->gamma_size;
+
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+}
+
+/* Find the real fb for a given fb helper CRTC */
+static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *c;
+
+ list_for_each_entry(c, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->base.id == c->base.id)
+ return c->fb;
+ }
+
+ return NULL;
+}
+
+bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev;
+ struct drm_plane *plane;
+ bool error = false;
+ int i;
+
+ if (fb_helper == NULL)
+ return error;
+
+ dev = fb_helper->dev;
+
+ list_for_each_entry(plane, struct drm_plane, &dev->mode_config.plane_list, head)
+ drm_plane_force_disable(plane);
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+ struct drm_crtc *crtc = mode_set->crtc;
+ int ret;
+
+ if (crtc->funcs->cursor_set) {
+ ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
+ if (ret)
+ error = true;
+ }
+
+ ret = drm_mode_set_config_internal(mode_set);
+ if (ret)
+ error = true;
+ }
+ return error;
+}
+
+bool drm_fb_helper_force_kernel_mode(void)
+{
+ bool ret, error = false;
+ struct drm_fb_helper *helper;
+
+ if (list_empty(&kernel_fb_helper_list))
+ return false;
+
+ list_for_each_entry(helper, struct drm_fb_helper, &kernel_fb_helper_list, kernel_fb_list) {
+ if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ continue;
+
+ ret = drm_fb_helper_restore_fbdev_mode(helper);
+ if (ret)
+ error = true;
+ }
+ return error;
+}
+
+static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc *crtc;
+ int bound = 0, crtcs_bound = 0;
+
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb)
+ crtcs_bound++;
+ if (crtc->fb == fb_helper->fb)
+ bound++;
+ }
+
+ if (bound < crtcs_bound)
+ return false;
+ return true;
+}
+
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
+{
+ int i;
+ struct drm_device *dev = helper->dev;
+
+ kfree(helper->connector_info, dev->mode_config.num_connector * sizeof(struct drm_fb_helper_connector *));
+ for (i = 0; i < helper->crtc_count; i++) {
+ kfree(helper->crtc_info[i].mode_set.connectors, INTELFB_CONN_LIMIT * sizeof(struct drm_connector *));
+ if (helper->crtc_info[i].mode_set.mode)
+ drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
+ }
+ kfree(helper->crtc_info, helper->crtc_count * sizeof(struct drm_fb_helper_crtc));
+}
+
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper,
+ int crtc_count, int max_conn_count)
+{
+ struct drm_crtc *crtc;
+ int i;
+
+ fb_helper->dev = dev;
+
+ INIT_LIST_HEAD(&kernel_fb_helper_list);
+ INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
+
+ fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+ if (!fb_helper->crtc_info)
+ return -ENOMEM;
+
+ fb_helper->crtc_count = crtc_count;
+ fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
+ if (!fb_helper->connector_info) {
+ kfree(fb_helper->crtc_info, sizeof(struct drm_fb_helper_crtc));
+ return -ENOMEM;
+ }
+ fb_helper->connector_count = 0;
+
+ for (i = 0; i < crtc_count; i++) {
+ fb_helper->crtc_info[i].mode_set.connectors =
+ kcalloc(max_conn_count,
+ sizeof(struct drm_connector *),
+ GFP_KERNEL);
+
+ if (!fb_helper->crtc_info[i].mode_set.connectors)
+ goto out_free;
+ fb_helper->crtc_info[i].mode_set.num_connectors = 0;
+ }
+
+ i = 0;
+ list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
+ fb_helper->crtc_info[i].mode_set.crtc = crtc;
+ i++;
+ }
+
+ return 0;
+out_free:
+ drm_fb_helper_crtc_free(fb_helper);
+ return -ENOMEM;
+}
+
+void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+{
+ if (!list_empty(&fb_helper->kernel_fb_list)) {
+ list_del(&fb_helper->kernel_fb_list);
+ if (list_empty(&kernel_fb_helper_list)) {
+ DRM_INFO("drm: unregistered panic notifier");
+ }
+ }
+
+ drm_fb_helper_crtc_free(fb_helper);
+
+}
+
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
+{
+ int ret = 0;
+ int crtc_count = 0;
+ int i;
+ struct drm_fb_helper_surface_size sizes;
+ int gamma_size = 0;
+
+ (void) memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
+ sizes.fb_width = (unsigned)-1;
+ sizes.fb_height = (unsigned)-1;
+
+ /* if driver picks 8 or 16 by default use that
+ for both depth/bpp */
+ if (preferred_bpp != sizes.surface_bpp)
+ sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
+
+ /* first up get a count of crtcs now in use and new min/maxes width/heights */
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
+ struct drm_cmdline_mode *cmdline_mode;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+
+ if (cmdline_mode->bpp_specified) {
+ switch (cmdline_mode->bpp) {
+ case 8:
+ sizes.surface_depth = sizes.surface_bpp = 8;
+ break;
+ case 15:
+ sizes.surface_depth = 15;
+ sizes.surface_bpp = 16;
+ break;
+ case 16:
+ sizes.surface_depth = sizes.surface_bpp = 16;
+ break;
+ case 24:
+ sizes.surface_depth = sizes.surface_bpp = 24;
+ break;
+ case 32:
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
+ break;
+ }
+ break;
+ }
+ }
+
+ crtc_count = 0;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_display_mode *desired_mode;
+ desired_mode = fb_helper->crtc_info[i].desired_mode;
+
+ if (desired_mode) {
+ if (gamma_size == 0)
+ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+ if (desired_mode->hdisplay < sizes.fb_width)
+ sizes.fb_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay < sizes.fb_height)
+ sizes.fb_height = desired_mode->vdisplay;
+ if (desired_mode->hdisplay > sizes.surface_width)
+ sizes.surface_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay > sizes.surface_height)
+ sizes.surface_height = desired_mode->vdisplay;
+ crtc_count++;
+ }
+ }
+
+ if (crtc_count == 0 || sizes.fb_width == (unsigned)-1 || sizes.fb_height == (unsigned)-1) {
+ /* hmm everyone went away - assume VGA cable just fell out
+ and will come back later. */
+ DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
+ sizes.fb_width = sizes.surface_width = 1024;
+ sizes.fb_height = sizes.surface_height = 768;
+ }
+
+ /* push down into drivers */
+ ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Set the fb pointer - usually drm_setup_crtcs does this for hotplug
+ * events, but at init time drm_setup_crtcs needs to be called before
+ * the fb is allocated (since we need to figure out the desired size of
+ * the fb before we can allocate it ...). Hence we need to fix things up
+ * here again.
+ */
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ if (fb_helper->crtc_info[i].mode_set.num_connectors)
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
+
+
+ /* Switch back to kernel console on panic */
+ /* multi card linked list maybe */
+ if (list_empty(&kernel_fb_helper_list)) {
+ DRM_INFO("registered panic notifier");
+ }
+
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list, (caddr_t)fb_helper);
+
+ return 0;
+}
+
+static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
+ uint32_t maxX,
+ uint32_t maxY)
+{
+ struct drm_connector *connector;
+ int count = 0;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ count += connector->funcs->fill_modes(connector, maxX, maxY);
+ }
+
+ return count;
+}
+
+static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, struct drm_display_mode, &fb_connector->connector->modes, head) {
+ if (drm_mode_width(mode) > width ||
+ drm_mode_height(mode) > height)
+ continue;
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ return mode;
+ }
+ return NULL;
+}
+
+static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+{
+ struct drm_cmdline_mode *cmdline_mode;
+ cmdline_mode = &fb_connector->cmdline_mode;
+ return cmdline_mode->specified;
+}
+
+static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+ /* LINTED E_FUNC_ARG_UNUSED */
+ int width, int height)
+{
+ struct drm_cmdline_mode *cmdline_mode;
+ struct drm_display_mode *mode = NULL;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+ if (cmdline_mode->specified == false)
+ return mode;
+
+ /* attempt to find a matching mode in the list of modes
+ * we have gotten so far, if not add a CVT mode that conforms
+ */
+ if (cmdline_mode->rb || cmdline_mode->margins)
+ goto create_mode;
+
+ list_for_each_entry(mode, struct drm_display_mode, &fb_helper_conn->connector->modes, head) {
+ /* check width/height */
+ if (mode->hdisplay != cmdline_mode->xres ||
+ mode->vdisplay != cmdline_mode->yres)
+ continue;
+
+ if (cmdline_mode->refresh_specified) {
+ if (mode->vrefresh != cmdline_mode->refresh)
+ continue;
+ }
+
+ if (cmdline_mode->interlace) {
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+ }
+ return mode;
+ }
+
+create_mode:
+ mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
+ cmdline_mode);
+ if (mode)
+ list_add(&mode->head, &fb_helper_conn->connector->modes,
+ (caddr_t)mode);
+ return mode;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+ bool enable;
+
+ if (strict)
+ enable = connector->status == connector_status_connected;
+ else
+ enable = connector->status != connector_status_disconnected;
+
+ return enable;
+}
+
+static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
+ bool *enabled)
+{
+ bool any_enabled = false;
+ struct drm_connector *connector;
+ int i = 0;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, true);
+ DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+ enabled[i] ? "yes" : "no");
+ any_enabled |= enabled[i];
+ }
+
+ if (any_enabled)
+ return;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, false);
+ }
+}
+
+static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ int count, i, j;
+ bool can_clone = false;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ struct drm_display_mode *dmt_mode, *mode;
+
+ /* only contemplate cloning in the single crtc case */
+ if (fb_helper->crtc_count > 1)
+ return false;
+
+ count = 0;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (enabled[i])
+ count++;
+ }
+
+ /* only contemplate cloning if more than one connector is enabled */
+ if (count <= 1)
+ return false;
+
+ /* check the command line or if nothing common pick 1024x768 */
+ can_clone = true;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (!enabled[i])
+ continue;
+ fb_helper_conn = fb_helper->connector_info[i];
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ can_clone = false;
+ break;
+ }
+ for (j = 0; j < i; j++) {
+ if (!enabled[j])
+ continue;
+ if (!drm_mode_equal(modes[j], modes[i]))
+ can_clone = false;
+ }
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using command line\n");
+ return true;
+ }
+
+ /* try and find a 1024x768 mode on each connector */
+ can_clone = true;
+ dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false);
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+
+ if (!enabled[i])
+ continue;
+
+ fb_helper_conn = fb_helper->connector_info[i];
+ list_for_each_entry(mode, struct drm_display_mode, &fb_helper_conn->connector->modes, head) {
+ if (drm_mode_equal(mode, dmt_mode))
+ modes[i] = mode;
+ }
+ if (!modes[i])
+ can_clone = false;
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using 1024x768\n");
+ return true;
+ }
+ DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+ return false;
+}
+
+static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ fb_helper_conn = fb_helper->connector_info[i];
+
+ if (enabled[i] == false)
+ continue;
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+
+ /* got for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+ modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+ }
+ /* No preferred modes, pick one off the list */
+ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
+ list_for_each_entry(modes[i], struct drm_display_mode, &fb_helper_conn->connector->modes, head)
+ /* LINTED */
+ { if (1) break; }
+ }
+ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+ "none");
+ }
+ return true;
+}
+
+static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **best_crtcs,
+ struct drm_display_mode **modes,
+ int n, int width, int height)
+{
+ int c, o;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ struct drm_connector_helper_funcs *connector_funcs;
+ struct drm_encoder *encoder;
+ int my_score, best_score, score;
+ struct drm_fb_helper_crtc **crtcs, *crtc;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int num_connector = dev->mode_config.num_connector;
+
+ if (n == fb_helper->connector_count)
+ return 0;
+
+ ASSERT(n <= num_connector);
+
+ fb_helper_conn = fb_helper->connector_info[n];
+ connector = fb_helper_conn->connector;
+
+ best_crtcs[n] = NULL;
+ best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
+ if (modes[n] == NULL)
+ return best_score;
+
+ crtcs = kzalloc(num_connector * sizeof(struct drm_fb_helper_crtc *),
+ GFP_KERNEL);
+ if (!crtcs)
+ return best_score;
+
+ my_score = 1;
+ if (connector->status == connector_status_connected)
+ my_score++;
+ if (drm_has_cmdline_mode(fb_helper_conn))
+ my_score++;
+ if (drm_has_preferred_mode(fb_helper_conn, width, height))
+ my_score++;
+
+ connector_funcs = connector->helper_private;
+ encoder = connector_funcs->best_encoder(connector);
+ if (!encoder)
+ goto out;
+
+ /* select a crtc for this connector and then attempt to configure
+ remaining connectors */
+ for (c = 0; c < fb_helper->crtc_count; c++) {
+ crtc = &fb_helper->crtc_info[c];
+
+ if ((encoder->possible_crtcs & (1 << c)) == 0)
+ continue;
+
+ for (o = 0; o < n; o++)
+ if (best_crtcs[o] == crtc)
+ break;
+
+ if (o < n) {
+ /* ignore cloning unless only a single crtc */
+ if (fb_helper->crtc_count > 1)
+ continue;
+
+ if (!drm_mode_equal(modes[o], modes[n]))
+ continue;
+ }
+
+ crtcs[n] = crtc;
+ (void) memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
+ score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
+ width, height);
+ if (score > best_score) {
+ best_score = score;
+ (void) memcpy(best_crtcs, crtcs,
+ num_connector *
+ sizeof(struct drm_fb_helper_crtc *));
+ }
+ }
+out:
+ kfree(crtcs, (num_connector * sizeof(struct drm_fb_helper_crtc *)));
+ return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_crtc **crtcs;
+ struct drm_display_mode **modes;
+ struct drm_mode_set *modeset;
+ bool *enabled;
+ int width, height;
+ int i, num_connector = dev->mode_config.num_connector;
+
+ DRM_DEBUG_KMS("\n");
+
+ width = dev->mode_config.max_width;
+ height = dev->mode_config.max_height;
+
+ if ((crtcs = kcalloc(num_connector,
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL)) == NULL) {
+ DRM_ERROR("Memory allocation failed for crtcs\n");
+ return;
+ }
+ if ((modes = kcalloc(num_connector,
+ sizeof(struct drm_display_mode *), GFP_KERNEL)) == NULL) {
+ DRM_ERROR("Memory allocation failed for modes\n");
+ goto errout1;
+ }
+ if ((enabled = kcalloc(num_connector,
+ sizeof(bool), GFP_KERNEL)) == NULL) {
+ DRM_ERROR("Memory allocation failed for enabled\n");
+ goto errout2;
+ }
+
+
+ drm_enable_connectors(fb_helper, enabled);
+
+ if (!(fb_helper->funcs->initial_config &&
+ fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
+ enabled, width, height))) {
+ (void) memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
+ (void) memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
+
+ if (!drm_target_cloned(fb_helper,
+ modes, enabled, width, height) &&
+ !drm_target_preferred(fb_helper,
+ modes, enabled, width, height))
+ DRM_ERROR("Unable to find initial modes\n");
+
+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
+ width, height);
+
+ (void) drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+ }
+
+ /* need to set the modesets up here for use later */
+ /* fill out the connector<->crtc mappings into the modesets */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ modeset->num_connectors = 0;
+ modeset->fb = NULL;
+ }
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_display_mode *mode = modes[i];
+ struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+ modeset = &fb_crtc->mode_set;
+
+ if (mode && fb_crtc) {
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+ mode->name, fb_crtc->mode_set.crtc->base.id);
+ fb_crtc->desired_mode = mode;
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev,
+ fb_crtc->desired_mode);
+ modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ modeset->fb = fb_helper->fb;
+ }
+ }
+
+ /* Clear out any old modes if there are no more connected outputs. */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ if (modeset->num_connectors == 0) {
+ BUG_ON(modeset->fb);
+ BUG_ON(modeset->num_connectors);
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = NULL;
+ }
+ }
+
+ kfree(enabled, num_connector * sizeof(bool));
+errout2:
+ kfree(modes, num_connector * sizeof(struct drm_display_mode *));
+errout1:
+ kfree(crtcs, num_connector * sizeof(struct drm_fb_helper_crtc *));
+}
+
+/**
+ * drm_helper_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+ *
+ *
+ * Scan the CRTCs and connectors and try to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * Note that this also registers the fbdev and so allows userspace to call into
+ * the driver through the fbdev interfaces.
+ *
+ * This function will call down into the ->fb_probe callback to let
+ * the driver allocate and initialize the fbdev info structure and the drm
+ * framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
+ * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
+ * values for the fbdev info structure.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+{
+ struct drm_device *dev = fb_helper->dev;
+ int count = 0;
+
+ (void) drm_fb_helper_parse_command_line(fb_helper);
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ /*
+ * we shouldn't end up with no modes here.
+ */
+ if (count == 0) {
+ DRM_INFO("No connectors reported connected with modes");
+ }
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+
+
+/**
+ * drm_fb_helper_hotplug_event - respond to a hotplug notification by
+ * probing all the outputs attached to the fb.
+ * @fb_helper: the drm_fb_helper
+ *
+ * Scan the connectors attached to the fb_helper and try to put together a
+ * setup after *notification of a change in output configuration.
+ *
+ * Called at runtime, takes the mode config locks to be able to check/change the
+ * modeset configuration. Must be run from process context (which usually means
+ * either the output polling work or a work item launched from the driver's
+ * hotplug interrupt).
+ *
+ * Scan the connectors attached to the fb_helper and try to put together a
+ * setup after *notification of a change in output configuration.
+ *
+ * RETURNS:
+ * 0 on success and a non-zero error code otherwise.
+ */
+int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ u32 max_width, max_height, bpp_sel;
+
+ if (!fb_helper->fb)
+ return 0;
+
+ mutex_lock(&fb_helper->dev->mode_config.mutex);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ fb_helper->delayed_hotplug = true;
+ mutex_unlock(&fb_helper->dev->mode_config.mutex);
+ return 0;
+ }
+ DRM_DEBUG_KMS("\n");
+
+ max_width = fb_helper->fb->width;
+ max_height = fb_helper->fb->height;
+ bpp_sel = fb_helper->fb->bits_per_pixel;
+
+ (void) drm_fb_helper_probe_connector_modes(fb_helper, max_width,
+ max_height);
+ mutex_unlock(&fb_helper->dev->mode_config.mutex);
+
+ drm_modeset_lock_all(dev);
+ drm_setup_crtcs(fb_helper);
+ drm_modeset_unlock_all(dev);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+
+int drm_gfxp_setmode(int mode)
+{
+ bool ret = 0;
+ if (mode == 0) {
+ ret = drm_fb_helper_force_kernel_mode();
+ if (ret == true)
+ DRM_ERROR("Failed to restore crtc configuration\n");
+ }
+ if (mode == 1)
+ DRM_DEBUG_KMS("do nothing in entervt");
+ return ret;
+}
+
+struct gfxp_blt_ops drm_gfxp_ops = {
+ NULL, /* blt */
+ NULL, /* copy */
+ NULL, /* clear */
+ drm_gfxp_setmode, /* setmode */
+};
+
+void drm_register_fbops(struct drm_device *dev)
+{
+ gfxp_bm_register_fbops(dev->vgatext->private, &drm_gfxp_ops);
+}
+
+int drm_getfb_size(struct drm_device *dev)
+{
+ struct gfxp_bm_fb_info fb_info;
+ int size, pitch;
+ gfxp_bm_getfb_info(dev->vgatext->private, &fb_info);
+ pitch = ALIGN(fb_info.xres * ((fb_info.depth + 7) / 8), 64);
+ size = ALIGN(pitch *fb_info.yres, PAGE_SIZE);
+ return size;
+}
diff --git a/usr/src/uts/common/io/drm/drm_fops.c b/usr/src/uts/common/io/drm/drm_fops.c
index da61e4d..58d42f6 100644
--- a/usr/src/uts/common/io/drm/drm_fops.c
+++ b/usr/src/uts/common/io/drm/drm_fops.c
@@ -1,17 +1,22 @@
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*/
-/* BEGIN CSTYLED */
+/**
+ * \file drm_fops.c
+ * File operations for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Daryll Strauss <daryll@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
-/* drm_fops.h -- File operations for DRM -*- linux-c -*-
+/*
* Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
- */
-/*-
+ *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2013, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,100 +37,471 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+static int drm_open_helper(struct drm_minor *minor,
+ int clone_id, int flags, cred_t *credp);
+
+static __inline__ int drm_device_is_agp(struct drm_device *dev)
+{
+ if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
+ return 0;
+ if (dev->driver->device_is_agp != NULL) {
+ int err = (*dev->driver->device_is_agp) (dev);
+
+ if (err != 2) {
+ return err;
+ }
+ }
+
+ return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
+}
+
+static int drm_setup(struct drm_device * dev)
+{
+ int i;
+ int ret;
+ static bool first_call = true;
+
+ if (first_call) {
+ /* OSOL_drm: moved from drm_fill_in_dev */
+ if (drm_core_has_AGP(dev)) {
+ if (drm_device_is_agp(dev))
+ dev->agp = drm_agp_init(dev);
+ if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
+ && (dev->agp == NULL)) {
+ DRM_ERROR("Cannot initialize the agpgart module.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (dev->driver->firstopen) {
+ ret = dev->driver->firstopen(dev);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (first_call) {
+ /* OSOL_drm: moved from drm_get_dev */
+ /* setup the grouping for the legacy output */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
+ if (ret)
+ return ret;
+ }
+ }
+
+ atomic_set(&dev->ioctl_count, 0);
+
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ i = drm_dma_setup(dev);
+ if (i < 0)
+ return i;
+ }
+
+ for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
+ atomic_set(&dev->counts[i], 0);
+
+ dev->context_flag = 0;
+ dev->last_context = 0;
+ dev->if_version = 0;
+
+
+ DRM_DEBUG("\n");
+
+ /*
+ * The kernel's context could be created here, but is now created
+ * in drm_dma_enqueue. This is more resource-efficient for
+ * hardware that does not do DMA, but may mean that
+ * drm_select_queue fails between the time the interrupt is
+ * initialized and the time the queues are initialized.
+ */
+
+ first_call = false;
+ return 0;
+}
+
+/**
+ * Open file.
*
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Daryll Strauss <daryll@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
+ * \return zero on success or a negative number on failure.
*
+ * Searches the DRM device with the same minor number, calls open_helper(), and
+ * increments the device open count. If the open count was previous at zero,
+ * i.e., it's the first that the device is open, then calls setup().
*/
+int drm_open(struct drm_minor *minor, int clone_id, int flags, cred_t *credp)
+{
+ struct drm_device *dev = minor->dev;
+ int retcode = 0;
-/* END CSTYLED */
+ DRM_DEBUG("minor->index=%d, clone_id=%d", minor->index, clone_id);
-#include "drmP.h"
+ retcode = drm_open_helper(minor, clone_id, flags, credp);
+ if (!retcode) {
+ atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+ spin_lock(&dev->count_lock);
+ if (!dev->open_count++) {
+ spin_unlock(&dev->count_lock);
+ retcode = drm_setup(dev);
+ goto out;
+ }
+ spin_unlock(&dev->count_lock);
+ }
+out:
+ return retcode;
+}
-/*ARGSUSED*/
-drm_file_t *
-drm_find_file_by_proc(drm_device_t *dev, cred_t *credp)
+/**
+ * Called whenever a process opens /dev/drm.
+ *
+ * Creates and initializes a drm_file structure for the file private data in \p
+ * filp and add it into the double linked list in \p dev.
+ */
+static int drm_open_helper(struct drm_minor *minor,
+ int clone_id, int flags, cred_t *credp)
{
- pid_t pid = ddi_get_pid();
- drm_file_t *priv;
+ struct drm_device *dev = minor->dev;
+ struct drm_file *priv;
+ int minor_id = minor->index;
+ int ret;
+
+ if (flags & FEXCL)
+ return -EBUSY; /* No exclusive opens */
+
+ if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ return -EINVAL;
- TAILQ_FOREACH(priv, &dev->files, link)
- if (priv->pid == pid)
- return (priv);
- return (NULL);
+ DRM_DEBUG("pid = %d, minor = %d\n", ddi_get_pid(), minor_id);
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ (void) memset(priv, 0, sizeof(*priv));
+ (void) idr_replace(&minor->clone_idr, priv, clone_id); /* OSOL_drm */
+ priv->uid = crgetsuid(credp);
+ priv->pid = ddi_get_pid();
+ priv->minor = minor;
+ priv->ioctl_count = 0;
+ /* for compatibility root is always authenticated */
+ priv->authenticated = DRM_SUSER(credp);
+ priv->lock_count = 0;
+
+ INIT_LIST_HEAD(&priv->lhead);
+ INIT_LIST_HEAD(&priv->fbs);
+ mutex_init(&priv->fbs_lock, NULL, MUTEX_DRIVER, NULL);
+ INIT_LIST_HEAD(&priv->event_list);
+ DRM_INIT_WAITQUEUE(&priv->event_wait, DRM_INTR_PRI(dev));
+ priv->event_space = 4096; /* set aside 4k for event buffer */
+
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_open(dev, priv);
+
+ if (dev->driver->open) {
+ ret = dev->driver->open(dev, priv);
+ if (ret < 0)
+ goto out_free;
+ }
+
+
+ /* if there is no current master make this fd it */
+ mutex_lock(&dev->struct_mutex);
+ if (!priv->minor->master) {
+ /* create a new master */
+ priv->minor->master = drm_master_create(priv->minor);
+ if (!priv->minor->master) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ priv->is_master = 1;
+ /* take another reference for the copy in the local file priv */
+ priv->master = drm_master_get(priv->minor->master);
+
+ priv->authenticated = 1;
+
+ mutex_unlock(&dev->struct_mutex);
+ if (dev->driver->master_create) {
+ ret = dev->driver->master_create(dev, priv->master);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ /* drop both references if this fails */
+ drm_master_put(&priv->minor->master);
+ drm_master_put(&priv->master);
+ mutex_unlock(&dev->struct_mutex);
+ goto out_free;
+ }
+ }
+ mutex_lock(&dev->struct_mutex);
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, priv, true);
+ if (ret) {
+ /* drop both references if this fails */
+ drm_master_put(&priv->minor->master);
+ drm_master_put(&priv->master);
+ mutex_unlock(&dev->struct_mutex);
+ goto out_free;
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
+ } else {
+ /* get a reference to the master */
+ priv->master = drm_master_get(priv->minor->master);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ list_add(&priv->lhead, &dev->filelist, (caddr_t)priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+out_free:
+ kfree(priv, sizeof (*priv));
+ return ret;
}
+void drm_master_release(struct drm_device *dev, struct drm_file *fpriv)
+{
+ struct drm_master *master = fpriv->master;
+
+ if (drm_i_have_hw_lock(dev, fpriv)) {
+ DRM_DEBUG("Process %d dead, freeing lock for context %d",
+ DRM_CURRENTPID, _DRM_LOCKING_CONTEXT(master->lock.hw_lock->lock));
+ (void) drm_lock_free(&master->lock,
+ _DRM_LOCKING_CONTEXT(master->lock.hw_lock->lock));
+ }
+
+}
-drm_cminor_t *
-drm_find_file_by_minor(drm_device_t *dev, int minor)
+static void drm_events_release(struct drm_file *file_priv)
{
- drm_cminor_t *mp;
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_pending_event *e, *et;
+ struct drm_pending_vblank_event *v, *vt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
- TAILQ_FOREACH(mp, &dev->minordevs, link) {
- if (mp->minor == minor)
- return (mp);
+ /* Remove pending flips */
+ list_for_each_entry_safe(v, vt, struct drm_pending_vblank_event, &dev->vblank_event_list, base.link)
+ if (v->base.file_priv == file_priv) {
+ list_del(&v->base.link);
+ drm_vblank_put(dev, v->pipe);
+ v->base.destroy(&v->base, sizeof(struct drm_pending_vblank_event));
}
- return (NULL);
+
+ /* Remove unconsumed events */
+ list_for_each_entry_safe(e, et, struct drm_pending_event, &file_priv->event_list, link)
+ e->destroy(e, sizeof(struct drm_pending_vblank_event));
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
-/* drm_open_helper is called whenever a process opens /dev/drm. */
-/*ARGSUSED*/
+/**
+ * Release file.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * If the hardware lock is held then free it, and take it again for the kernel
+ * context since it's necessary to reclaim buffers. Unlink the file private
+ * data from its list and free it. Decreases the open count and if it reaches
+ * zero calls drm_lastclose().
+ */
int
-drm_open_helper(drm_device_t *dev, drm_cminor_t *mp, int flags,
- int otyp, cred_t *credp)
+drm_release(struct drm_file *file_priv)
{
- drm_file_t *priv;
- pid_t pid;
- int retcode;
+ struct drm_device *dev = file_priv->minor->dev;
+ int retcode = 0;
- if (flags & FEXCL)
- return (EBUSY); /* No exclusive opens */
- dev->flags = flags;
- pid = ddi_get_pid();
- DRM_DEBUG("drm_open_helper :pid = %d", pid);
- DRM_LOCK();
- priv = drm_find_file_by_proc(dev, credp);
- if (priv) {
- priv->refs++;
- } else {
- priv = drm_alloc(sizeof (*priv), DRM_MEM_FILES);
- if (priv == NULL) {
- DRM_UNLOCK();
- return (ENOMEM);
- }
- bzero(priv, sizeof (*priv));
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+ if (dev->driver->preclose)
+ dev->driver->preclose(dev, file_priv);
+
+ /* ========================================================
+ * Begin inline drm_release
+ */
+
+ /* Release any auth tokens that might point to this file_priv,
+ (do that under the drm_global_mutex) */
+ if (file_priv->magic)
+ (void) drm_remove_magic(file_priv->master, file_priv->magic);
+
+ /* if the master has gone away we can't do anything with the lock */
+ if (file_priv->minor->master)
+ drm_master_release(dev, file_priv);
+
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ drm_core_reclaim_buffers(dev, file_priv);
+
+ drm_events_release(file_priv);
- priv->uid = crgetsuid(credp);
- priv->pid = pid;
+ if (dev->driver->driver_features & DRIVER_MODESET)
+ drm_fb_release(file_priv);
- priv->refs = 1;
- priv->minor = 5; /* just for hack */
- priv->ioctl_count = 0;
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_release(dev, file_priv);
- /* for compatibility root is always authenticated */
- priv->authenticated = DRM_SUSER(credp);
+ mutex_lock(&dev->ctxlist_mutex);
+ if (!list_empty(&dev->ctxlist)) {
+ struct drm_ctx_list *pos, *n;
- if (dev->driver->use_gem == 1)
- drm_gem_open(priv);
+ list_for_each_entry_safe(pos, n, struct drm_ctx_list, &dev->ctxlist, head) {
+ if (pos->tag == file_priv &&
+ pos->handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver->context_dtor)
+ dev->driver->context_dtor(dev,
+ pos->handle);
- if (dev->driver->open) {
- retcode = dev->driver->open(dev, priv);
- if (retcode != 0) {
- drm_free(priv, sizeof (*priv), DRM_MEM_FILES);
- DRM_UNLOCK();
- return (retcode);
+ drm_ctxbitmap_free(dev, pos->handle);
+
+ list_del(&pos->head);
+ kfree(pos, sizeof (*pos));
+ --dev->ctx_count;
}
}
+ }
+ mutex_unlock(&dev->ctxlist_mutex);
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (file_priv->is_master) {
+ struct drm_master *master = file_priv->master;
+ struct drm_file *temp;
+ list_for_each_entry(temp, struct drm_file, &dev->filelist, lhead) {
+ if ((temp->master == file_priv->master) &&
+ (temp != file_priv))
+ temp->authenticated = 0;
+ }
+
+ /**
+ * Since the master is disappearing, so is the
+ * possibility to lock.
+ */
+
+ if (master->lock.hw_lock) {
+ master->lock.hw_lock = NULL;
+ master->lock.file_priv = NULL;
+ }
- /* first opener automatically becomes master */
- priv->master = TAILQ_EMPTY(&dev->files);
- TAILQ_INSERT_TAIL(&dev->files, priv, link);
+ if (file_priv->minor->master == file_priv->master) {
+ /* drop the reference held my the minor */
+ if (dev->driver->master_drop)
+ dev->driver->master_drop(dev, file_priv, true);
+ drm_master_put(&file_priv->minor->master);
+ }
+ }
+
+ /* drop the reference held my the file priv */
+ drm_master_put(&file_priv->master);
+ file_priv->is_master = 0;
+ list_del(&file_priv->lhead);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (dev->driver->postclose)
+ dev->driver->postclose(dev, file_priv);
+ kfree(file_priv, sizeof (*file_priv));
+
+ /* ========================================================
+ * End inline drm_release
+ */
+
+ atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
+ spin_lock(&dev->count_lock);
+ if (!--dev->open_count) {
+ if (atomic_read(&dev->ioctl_count)) {
+ DRM_ERROR("Device busy: %d\n",
+ atomic_read(&dev->ioctl_count));
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ spin_unlock(&dev->count_lock);
+ return drm_lastclose(dev);
+ }
+ spin_unlock(&dev->count_lock);
+
+ return retcode;
+}
+
+static bool
+drm_dequeue_event(struct drm_file *file_priv,
+ size_t total, size_t max, struct drm_pending_event **out)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_pending_event *e;
+ unsigned long flags;
+ bool ret = false;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ *out = NULL;
+ if (list_empty(&file_priv->event_list))
+ goto out;
+ e = list_first_entry(&file_priv->event_list,
+ struct drm_pending_event, link);
+ if (e->event->length + total > max)
+ goto out;
+
+ file_priv->event_space += e->event->length;
+ list_del(&e->link);
+ *out = e;
+ ret = true;
+
+out:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return ret;
+}
+
+ssize_t drm_read(struct drm_file *file_priv, struct uio *uiop)
+{
+ struct drm_pending_event *e;
+ size_t total;
+ int ret = 0;
+
+ DRM_WAIT(ret, &file_priv->event_wait,
+ !list_empty(&file_priv->event_list));
+ if (ret < 0) {
+ DRM_ERROR("returns %d event_list is %d", ret, list_empty(&file_priv->event_list));
+ return ret;
+ }
+
+ total = 0;
+ while (drm_dequeue_event(file_priv, total, uiop->uio_iov->iov_len, &e)) {
+ ret = uiomove((caddr_t)e->event, e->event->length, UIO_READ, uiop);
+ if (ret) {
+ DRM_ERROR("Failed to copy to user: %d", ret);
+ return (0);
+ }
+ total += e->event->length;
+ e->destroy(e, sizeof(struct drm_pending_vblank_event));
+ }
+
+ return total;
+}
+
+short drm_poll(struct drm_file *file_priv, short events)
+{
+ short revent = 0;
+
+ if (!list_empty(&file_priv->event_list)) {
+ if (events & POLLIN)
+ revent |= POLLIN;
+ if (events & POLLRDNORM)
+ revent |= POLLRDNORM;
}
- mp->fpriv = priv;
- DRM_UNLOCK();
- return (0);
+
+ return revent;
}
+
+
diff --git a/usr/src/uts/common/io/drm/drm_gem.c b/usr/src/uts/common/io/drm/drm_gem.c
index 69c5fc1..0c5651c 100644
--- a/usr/src/uts/common/io/drm/drm_gem.c
+++ b/usr/src/uts/common/io/drm/drm_gem.c
@@ -1,5 +1,9 @@
/*
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2009, 2013, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,27 +30,10 @@
*
*/
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#include <vm/anon.h>
-#include <vm/seg_kmem.h>
-#include <vm/seg_kp.h>
-#include <vm/seg_map.h>
-#include <sys/fcntl.h>
-#include <sys/vnode.h>
-#include <sys/file.h>
-#include <sys/bitmap.h>
-#include <sys/ddi.h>
-#include <sys/sunddi.h>
-#include <gfx_private.h>
#include "drmP.h"
-#include "drm.h"
+#include <vm/seg_kmem.h>
-/*
- * @file drm_gem.c
+/** @file drm_gem.c
*
* This file provides some of the base ioctls and library routines for
* the graphics memory manager implemented by each device driver.
@@ -67,241 +54,120 @@
* - Inability to allocate high fds will aggravate the X Server's select()
* handling, and likely that of many GL client applications as well.
*
- * This led to a plan of using our own integer IDs(called handles, following
+ * This led to a plan of using our own integer IDs (called handles, following
* DRM terminology) to mimic fds, and implement the fd syscalls we need as
* ioctls. The objects themselves will still include the struct file so
* that we can transition to fds if the required kernel infrastructure shows
* up at a later date, and as our interface with shmfs for memory allocation.
*/
-void
-idr_list_init(struct idr_list *head)
-{
- struct idr_list *entry;
- /* HASH for accelerate */
- entry = kmem_zalloc(DRM_GEM_OBJIDR_HASHNODE
- * sizeof (struct idr_list), KM_SLEEP);
- head->next = entry;
- for (int i = 0; i < DRM_GEM_OBJIDR_HASHNODE; i++) {
- INIT_LIST_HEAD(&entry[i]);
- }
-}
-
-int
-idr_list_get_new_above(struct idr_list *head,
- struct drm_gem_object *obj,
- int *handlep)
-{
- struct idr_list *entry;
- int key;
- entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
- key = obj->name % DRM_GEM_OBJIDR_HASHNODE;
- list_add(entry, &head->next[key], NULL);
- entry->obj = obj;
- entry->handle = obj->name;
- *handlep = obj->name;
- return (0);
-}
-
-struct drm_gem_object *
-idr_list_find(struct idr_list *head,
- uint32_t name)
-{
- struct idr_list *entry;
- int key;
- key = name % DRM_GEM_OBJIDR_HASHNODE;
-
- list_for_each(entry, &head->next[key]) {
- if (entry->handle == name)
- return (entry->obj);
- }
- return (NULL);
-}
+/*
+ * We make up offsets for buffer objects so we can recognize them at
+ * mmap time.
+ */
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-int
-idr_list_remove(struct idr_list *head,
- uint32_t name)
-{
- struct idr_list *entry, *temp;
- int key;
- key = name % DRM_GEM_OBJIDR_HASHNODE;
- list_for_each_safe(entry, temp, &head->next[key]) {
- if (entry->handle == name) {
- list_del(entry);
- kmem_free(entry, sizeof (*entry));
- return (0);
- }
- }
- DRM_ERROR("Failed to remove the object %d", name);
- return (-1);
-}
+int drm_use_mem_pool = 0;
+/* memory pool is used for all platforms now */
+#define HAS_MEM_POOL(gen) ((gen > 30) && (drm_use_mem_pool))
-void
-idr_list_free(struct idr_list *head)
-{
- struct idr_list *entry, *temp;
- for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) {
- list_for_each_safe(entry, temp, &head->next[key]) {
- list_del(entry);
- kmem_free(entry, sizeof (*entry));
- }
- }
- kmem_free(head->next,
- DRM_GEM_OBJIDR_HASHNODE * sizeof (struct idr_list));
- head->next = NULL;
-}
+/**
+ * Initialize the GEM device fields
+ */
int
-idr_list_empty(struct idr_list *head)
+drm_gem_init(struct drm_device *dev)
{
- int empty;
- for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) {
- empty = list_empty(&(head)->next[key]);
- if (!empty)
- return (empty);
- }
- return (1);
-}
-
-static uint32_t shfile_name = 0;
-#define SHFILE_NAME_MAX 0xffffffff
-/*
- * will be set to 1 for 32 bit x86 systems only, in startup.c
- */
-extern int segkp_fromheap;
-extern ulong_t *segkp_bitmap;
-
-void
-drm_gem_object_reference(struct drm_gem_object *obj)
-{
- atomic_inc(&obj->refcount);
-}
+ spin_lock_init(&dev->object_name_lock);
+ idr_list_init(&dev->object_name_idr);
-void
-drm_gem_object_unreference(struct drm_gem_object *obj)
-{
- if (obj == NULL)
- return;
+ gfxp_mempool_init();
- atomic_sub(1, &obj->refcount);
- if (obj->refcount == 0)
- drm_gem_object_free(obj);
+ return 0;
}
void
-drm_gem_object_handle_reference(struct drm_gem_object *obj)
+/* LINTED */
+drm_gem_destroy(struct drm_device *dev)
{
- drm_gem_object_reference(obj);
- atomic_inc(&obj->handlecount);
}
-void
-drm_gem_object_handle_unreference(struct drm_gem_object *obj)
+static void
+drm_gem_object_free_internal(struct drm_gem_object *obj, int gen)
{
- if (obj == NULL)
- return;
-
- /*
- * Must bump handle count first as this may be the last
- * ref, in which case the object would disappear before we
- * checked for a name
- */
- atomic_sub(1, &obj->handlecount);
- if (obj->handlecount == 0)
- drm_gem_object_handle_free(obj);
- drm_gem_object_unreference(obj);
+ if (obj->pfnarray != NULL)
+ kmem_free(obj->pfnarray, btopr(obj->real_size) * sizeof (pfn_t));
+ if (HAS_MEM_POOL(gen)) {
+ gfxp_free_mempool(&obj->mempool_cookie, obj->kaddr, obj->real_size);
+ } else {
+ (void) ddi_dma_unbind_handle(obj->dma_hdl);
+ ddi_dma_mem_free(&obj->acc_hdl);
+ ddi_dma_free_handle(&obj->dma_hdl);
+ }
+ obj->kaddr = NULL;
}
-/*
- * Initialize the GEM device fields
- */
-
-int
-drm_gem_init(struct drm_device *dev)
-{
- mutex_init(&dev->object_name_lock, NULL, MUTEX_DRIVER, NULL);
- idr_list_init(&dev->object_name_idr);
-
- atomic_set(&dev->object_count, 0);
- atomic_set(&dev->object_memory, 0);
- atomic_set(&dev->pin_count, 0);
- atomic_set(&dev->pin_memory, 0);
- atomic_set(&dev->gtt_count, 0);
- atomic_set(&dev->gtt_memory, 0);
- return (0);
-}
+static ddi_dma_attr_t old_dma_attr = {
+ DMA_ATTR_V0,
+ 0xff000U, /* dma_attr_addr_lo */
+ 0xffffffffU, /* dma_attr_addr_hi */
+ 0xffffffffU, /* dma_attr_count_max */
+ 4096, /* dma_attr_align */
+ 0x1fffU, /* dma_attr_burstsizes */
+ 1, /* dma_attr_minxfer */
+ 0xffffffffU, /* dma_attr_maxxfer */
+ 0xffffffffU, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen, variable */
+ 4, /* dma_attr_granular */
+ DDI_DMA_FLAGERR, /* dma_attr_flags */
+};
+
+static ddi_device_acc_attr_t old_acc_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_MERGING_OK_ACC,
+ DDI_FLAGERR_ACC
+};
-/*
- * Allocate a GEM object of the specified size with shmfs backing store
- */
-struct drm_gem_object *
-drm_gem_object_alloc(struct drm_device *dev, size_t size)
+static int
+drm_gem_object_alloc_internal_normal(struct drm_device *dev, struct drm_gem_object *obj,
+ size_t size, int flag)
{
- static ddi_dma_attr_t dma_attr = {
- DMA_ATTR_V0,
- 0U, /* dma_attr_addr_lo */
- 0xffffffffU, /* dma_attr_addr_hi */
- 0xffffffffU, /* dma_attr_count_max */
- 4096, /* dma_attr_align */
- 0x1fffU, /* dma_attr_burstsizes */
- 1, /* dma_attr_minxfer */
- 0xffffffffU, /* dma_attr_maxxfer */
- 0xffffffffU, /* dma_attr_seg */
- 1, /* dma_attr_sgllen, variable */
- 4, /* dma_attr_granular */
- 0 /* dma_attr_flags */
- };
- static ddi_device_acc_attr_t acc_attr = {
- DDI_DEVICE_ATTR_V0,
- DDI_NEVERSWAP_ACC,
- DDI_MERGING_OK_ACC
- };
- struct drm_gem_object *obj;
ddi_dma_cookie_t cookie;
uint_t cookie_cnt;
- drm_local_map_t *map;
-
pgcnt_t real_pgcnt, pgcnt = btopr(size);
- uint32_t paddr, cookie_end;
+ uint64_t paddr, cookie_end;
int i, n;
+ int (*cb)(caddr_t);
+ ddi_device_acc_attr_t *acc_attr;
+ ddi_dma_attr_t* dma_attr;
+ uint_t mode_flag;
- obj = kmem_zalloc(sizeof (struct drm_gem_object), KM_NOSLEEP);
- if (obj == NULL)
- return (NULL);
+ acc_attr = &old_acc_attr;
+ dma_attr = &old_dma_attr;
+ mode_flag = IOMEM_DATA_UC_WR_COMBINE;
- obj->dev = dev;
- obj->flink = 0;
- obj->size = size;
+ cb = (flag == 0) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
+ dma_attr->dma_attr_sgllen = (int)pgcnt;
- if (shfile_name == SHFILE_NAME_MAX) {
- DRM_ERROR("No name space for object");
+ if (ddi_dma_alloc_handle(dev->devinfo, dma_attr,
+ cb, NULL, &obj->dma_hdl)) {
+ DRM_ERROR("ddi_dma_alloc_handle failed");
goto err1;
- } else {
- obj->name = ++shfile_name;
}
-
- dma_attr.dma_attr_sgllen = (int)pgcnt;
-
- if (ddi_dma_alloc_handle(dev->dip, &dma_attr,
- DDI_DMA_DONTWAIT, NULL, &obj->dma_hdl)) {
- DRM_ERROR("drm_gem_object_alloc: "
- "ddi_dma_alloc_handle failed");
- goto err1;
- }
- if (ddi_dma_mem_alloc(obj->dma_hdl, ptob(pgcnt), &acc_attr,
- IOMEM_DATA_UC_WR_COMBINE, DDI_DMA_DONTWAIT, NULL,
+ if (ddi_dma_mem_alloc(obj->dma_hdl, ptob(pgcnt), acc_attr,
+ mode_flag, cb, NULL,
&obj->kaddr, &obj->real_size, &obj->acc_hdl)) {
- DRM_ERROR("drm_gem_object_alloc: "
- "ddi_dma_mem_alloc failed");
+ DRM_ERROR("ddi_dma_mem_alloc failed");
goto err2;
}
if (ddi_dma_addr_bind_handle(obj->dma_hdl, NULL,
obj->kaddr, obj->real_size, DDI_DMA_RDWR,
- DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_cnt)
+ cb, NULL, &cookie, &cookie_cnt)
!= DDI_DMA_MAPPED) {
- DRM_ERROR("drm_gem_object_alloc: "
- "ddi_dma_addr_bind_handle failed");
+ DRM_ERROR("ddi_dma_addr_bind_handle failed");
goto err3;
}
@@ -309,83 +175,232 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
obj->pfnarray = kmem_zalloc(real_pgcnt * sizeof (pfn_t), KM_NOSLEEP);
if (obj->pfnarray == NULL) {
+ DRM_DEBUG("pfnarray == NULL");
goto err4;
}
+
for (n = 0, i = 1; ; i++) {
- for (paddr = cookie.dmac_address,
- cookie_end = cookie.dmac_address + cookie.dmac_size;
+ for (paddr = cookie.dmac_laddress,
+ cookie_end = cookie.dmac_laddress + cookie.dmac_size;
paddr < cookie_end;
paddr += PAGESIZE) {
obj->pfnarray[n++] = btop(paddr);
if (n >= real_pgcnt)
- goto addmap;
+ return (0);
}
if (i >= cookie_cnt)
break;
ddi_dma_nextcookie(obj->dma_hdl, &cookie);
}
-addmap:
+err4:
+ (void) ddi_dma_unbind_handle(obj->dma_hdl);
+err3:
+ ddi_dma_mem_free(&obj->acc_hdl);
+err2:
+ ddi_dma_free_handle(&obj->dma_hdl);
+err1:
+ return (-1);
+
+}
+
+/* Alloc GEM object by memory pool */
+static int
+drm_gem_object_alloc_internal_mempool(struct drm_gem_object *obj,
+ size_t size, int flag)
+{
+ int ret;
+ pgcnt_t pgcnt = btopr(size);
+
+ obj->pfnarray = kmem_zalloc(pgcnt * sizeof (pfn_t), KM_NOSLEEP);
+ if (obj->pfnarray == NULL) {
+ DRM_ERROR("Failed to allocate pfnarray ");
+ return (-1);
+ }
+
+ ret = gfxp_alloc_from_mempool(&obj->mempool_cookie, &obj->kaddr,
+ obj->pfnarray, pgcnt, flag);
+ if (ret) {
+ DRM_ERROR("Failed to alloc pages from memory pool");
+ kmem_free(obj->pfnarray, pgcnt * sizeof (pfn_t));
+ return (-1);
+ }
+
+ obj->real_size = size;
+ return (0);
+}
+
+static int
+drm_gem_object_internal(struct drm_device *dev, struct drm_gem_object *obj,
+ size_t size, int gen)
+{
+ pfn_t tmp_pfn;
+ int ret, num = 0;
+
+alloc_again:
+ if (HAS_MEM_POOL(gen)) {
+ uint32_t mode;
+ if (gen >= 60)
+ mode = GFXP_MEMORY_CACHED;
+ else
+ mode = GFXP_MEMORY_WRITECOMBINED;
+ ret = drm_gem_object_alloc_internal_mempool(obj, size, mode);
+ if (ret)
+ return (-1);
+ } else {
+ ret = drm_gem_object_alloc_internal_normal(dev, obj, size, 0);
+ if (ret)
+ return (-1);
+ }
+ tmp_pfn = hat_getpfnum(kas.a_hat, obj->kaddr);
+ if (tmp_pfn != obj->pfnarray[0]) {
+ DRM_ERROR("obj %p map incorrect 0x%lx != 0x%lx",
+ (void *)obj, tmp_pfn, obj->pfnarray[0]);
+ drm_gem_object_free_internal(obj, gen);
+ udelay(150);
+
+ if (num++ < 5)
+ goto alloc_again;
+ else
+ return (-1);
+ }
+
+ return (0);
+}
+/*
+ * Initialize an already allocate GEM object of the specified size with
+ * shmfs backing store.
+ */
+int
+drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
+ size_t size, int gen)
+{
+ drm_local_map_t *map;
+ int ret;
+
+ if (size == 0) {
+ DRM_DEBUG("size == 0");
+ return (-1);
+ }
+
+ obj->dev = dev;
+ obj->size = size;
+
+ ret = drm_gem_object_internal(dev, obj, size, gen);
+ if (ret)
+ return (-1);
+
map = drm_alloc(sizeof (struct drm_local_map), DRM_MEM_MAPS);
if (map == NULL) {
+ DRM_DEBUG("map == NULL");
goto err5;
}
map->handle = obj;
map->offset = (uintptr_t)map->handle;
map->offset &= 0xffffffffUL;
- map->dev_addr = map->handle;
map->size = obj->real_size;
- map->type = _DRM_TTM;
+ map->type = _DRM_GEM;
+ map->callback = 0;
map->flags = _DRM_WRITE_COMBINING | _DRM_REMOVABLE;
- map->drm_umem_cookie =
+ map->umem_cookie =
gfxp_umem_cookie_init(obj->kaddr, obj->real_size);
- if (map->drm_umem_cookie == NULL) {
+ if (map->umem_cookie == NULL) {
+ DRM_DEBUG("umem_cookie == NULL");
goto err6;
}
- obj->map = map;
-
- atomic_set(&obj->refcount, 1);
- atomic_set(&obj->handlecount, 1);
- if (dev->driver->gem_init_object != NULL &&
- dev->driver->gem_init_object(obj) != 0) {
+ obj->maplist.map = map;
+ if (drm_map_handle(dev, &obj->maplist)) {
+ DRM_DEBUG("drm_map_handle failed");
goto err7;
}
- atomic_inc(&dev->object_count);
- atomic_add(obj->size, &dev->object_memory);
- return (obj);
+ kref_init(&obj->refcount);
+ atomic_set(&obj->handle_count, 0);
+
+ if (MDB_TRACK_ENABLE) {
+ INIT_LIST_HEAD(&obj->track_list);
+ spin_lock(&dev->track_lock);
+ list_add_tail(&obj->track_list, &dev->gem_objects_list, (caddr_t)obj);
+ spin_unlock(&dev->track_lock);
+
+ INIT_LIST_HEAD(&obj->his_list);
+ drm_gem_object_track(obj, "obj init", 0, 0, NULL);
+ }
+
+ INIT_LIST_HEAD(&obj->seg_list);
+
+ return (0);
err7:
- gfxp_umem_cookie_destroy(map->drm_umem_cookie);
+ gfxp_umem_cookie_destroy(map->umem_cookie);
err6:
drm_free(map, sizeof (struct drm_local_map), DRM_MEM_MAPS);
err5:
- kmem_free(obj->pfnarray, real_pgcnt * sizeof (pfn_t));
-err4:
- (void) ddi_dma_unbind_handle(obj->dma_hdl);
-err3:
- ddi_dma_mem_free(&obj->acc_hdl);
-err2:
- ddi_dma_free_handle(&obj->dma_hdl);
-err1:
- kmem_free(obj, sizeof (struct drm_gem_object));
+ drm_gem_object_free_internal(obj, gen);
+ return (-1);
+}
- return (NULL);
+/**
+ * Initialize an already allocated GEM object of the specified size with
+ * no GEM provided backing store. Instead the caller is responsible for
+ * backing the object and handling it.
+ */
+int drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
+{
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj->dev = dev;
+
+ kref_init(&obj->refcount);
+ atomic_set(&obj->handle_count, 0);
+ obj->size = size;
+
+ return 0;
}
-/*
+/**
+ * Allocate a GEM object of the specified size with shmfs backing store
+ */
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ goto free;
+
+ if (drm_gem_object_init(dev, obj, size, 0) != 0) {
+ kmem_free(obj, sizeof (struct drm_gem_object));
+ return NULL;
+ }
+
+ if (dev->driver->gem_init_object != NULL &&
+ dev->driver->gem_init_object(obj) != 0) {
+ goto fput;
+ }
+ return obj;
+fput:
+ /* Object_init mangles the global counters - readjust them. */
+ drm_gem_object_release(obj);
+ kfree(obj, sizeof(*obj));
+free:
+ return NULL;
+}
+
+/**
* Removes the mapping from handle to filp for this object.
*/
-static int
-drm_gem_handle_delete(struct drm_file *filp, int handle)
+int
+drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
struct drm_device *dev;
struct drm_gem_object *obj;
- int err;
- /*
- * This is gross. The idr system doesn't let us try a delete and
+
+ /* This is gross. The idr system doesn't let us try a delete and
* return an error code. It just spews if you fail at deleting.
* So, we have to grab a lock around finding the object and then
* doing the delete on it and dropping the refcount, or the user
@@ -400,34 +415,32 @@ drm_gem_handle_delete(struct drm_file *filp, int handle)
obj = idr_list_find(&filp->object_idr, handle);
if (obj == NULL) {
spin_unlock(&filp->table_lock);
- DRM_ERROR("obj %d is not in tne list, failed to close", handle);
- return (EINVAL);
+ return -EINVAL;
}
dev = obj->dev;
/* Release reference and decrement refcount. */
- err = idr_list_remove(&filp->object_idr, handle);
- if (err == -1)
- DRM_ERROR("%s", __func__);
-
+ (void) idr_list_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
- spin_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- spin_unlock(&dev->struct_mutex);
- return (0);
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, filp);
+ drm_gem_object_handle_unreference_unlocked(obj);
+
+ return 0;
}
-/*
+/**
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
*/
int
drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
- int *handlep)
+ struct drm_gem_object *obj,
+ u32 *handlep)
{
+ struct drm_device *dev = obj->dev;
int ret;
/*
@@ -435,27 +448,37 @@ drm_gem_handle_create(struct drm_file *file_priv,
*/
again:
/* ensure there is space available to allocate a handle */
+ if (idr_list_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
+ return -ENOMEM;
/* do the allocation under our spinlock */
spin_lock(&file_priv->table_lock);
- ret = idr_list_get_new_above(&file_priv->object_idr, obj, handlep);
+ ret = idr_list_get_new_above(&file_priv->object_idr, (void *)obj, (int *)handlep);
spin_unlock(&file_priv->table_lock);
if (ret == -EAGAIN)
goto again;
- if (ret != 0) {
- DRM_ERROR("Failed to create handle");
- return (ret);
- }
+ if (ret != 0)
+ return ret;
drm_gem_object_handle_reference(obj);
- return (0);
+
+ if (dev->driver->gem_open_object) {
+ ret = dev->driver->gem_open_object(obj, file_priv);
+ if (ret) {
+ (void) drm_gem_handle_delete(file_priv, *handlep);
+ return ret;
+ }
+ }
+
+ return 0;
}
-/* Returns a reference to the object named by the handle. */
+/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
-drm_gem_object_lookup(struct drm_file *filp,
- int handle)
+/* LINTED */
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+ u32 handle)
{
struct drm_gem_object *obj;
@@ -463,210 +486,237 @@ drm_gem_object_lookup(struct drm_file *filp,
/* Check if we currently have a reference on the object */
obj = idr_list_find(&filp->object_idr, handle);
- if (obj == NULL) {
- spin_unlock(&filp->table_lock);
- DRM_ERROR("object_lookup failed, handle %d", handle);
- return (NULL);
- }
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ return NULL;
+ }
drm_gem_object_reference(obj);
spin_unlock(&filp->table_lock);
- return (obj);
+ return obj;
}
-/*
+/**
* Releases the handle to an mm object.
*/
-/*ARGSUSED*/
int
+/* LINTED */
drm_gem_close_ioctl(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- struct drm_gem_close args;
+ struct drm_gem_close *args = data;
int ret;
- if (!(dev->driver->use_gem == 1))
- return (ENODEV);
-
- DRM_COPYFROM_WITH_RETURN(&args,
- (void *)data, sizeof (args));
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
- ret = drm_gem_handle_delete(fpriv, args.handle);
+ ret = drm_gem_handle_delete(file, args->handle);
- return (ret);
+ return ret;
}
-/*
+/**
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
* is freed, the name goes away.
*/
-/*ARGSUSED*/
int
+/* LINTED */
drm_gem_flink_ioctl(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- struct drm_gem_flink args;
+ struct drm_gem_flink *args = data;
struct drm_gem_object *obj;
- int ret, handle;
+ int ret;
- if (!(dev->driver->use_gem == 1))
- return (ENODEV);
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
- DRM_COPYFROM_WITH_RETURN(&args,
- (void *)data, sizeof (args));
- obj = drm_gem_object_lookup(fpriv, args.handle);
+ obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
- return (EINVAL);
- handle = args.handle;
+ return -ENOENT;
+
+again:
+ if (idr_list_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
spin_lock(&dev->object_name_lock);
- if (!obj->flink) {
- /* only creat a node in object_name_idr, no update anything */
- ret = idr_list_get_new_above(&dev->object_name_idr,
- obj, &handle);
- obj->flink = obj->name;
+ if (!obj->name) {
+ ret = idr_list_get_new_above(&dev->object_name_idr, (void *) obj,
+ &obj->name);
+ args->name = (uint64_t) obj->name;
+ spin_unlock(&dev->object_name_lock);
+
+ if (ret == -EAGAIN)
+ goto again;
+
+ if (ret != 0)
+ goto err;
+
/* Allocate a reference for the name table. */
drm_gem_object_reference(obj);
+ } else {
+ args->name = (uint64_t) obj->name;
+ spin_unlock(&dev->object_name_lock);
+ ret = 0;
}
- /*
- * Leave the reference from the lookup around as the
- * name table now holds one
- */
- args.name = obj->name;
- spin_unlock(&dev->object_name_lock);
- ret = DRM_COPY_TO_USER((void *) data, &args, sizeof (args));
- if (ret != 0)
- DRM_ERROR(" gem flink error! %d", ret);
-
- spin_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
-
- return (ret);
+err:
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
}
-/*
+/**
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
* will not go away until the handle is deleted.
*/
-/*ARGSUSED*/
int
+/* LINTED */
drm_gem_open_ioctl(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- struct drm_gem_open args;
+ struct drm_gem_open *args = data;
struct drm_gem_object *obj;
int ret;
- int handle;
+ u32 handle;
- if (!(dev->driver->use_gem == 1)) {
- DRM_ERROR("Not support GEM");
- return (ENODEV);
- }
- DRM_COPYFROM_WITH_RETURN(&args,
- (void *) data, sizeof (args));
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
spin_lock(&dev->object_name_lock);
-
- obj = idr_list_find(&dev->object_name_idr, args.name);
-
+ obj = idr_list_find(&dev->object_name_idr, (int) args->name);
if (obj)
drm_gem_object_reference(obj);
spin_unlock(&dev->object_name_lock);
- if (!obj) {
- DRM_ERROR("Can't find the obj %d", args.name);
- return (ENOENT);
- }
+ if (!obj)
+ return -ENOENT;
- ret = drm_gem_handle_create(fpriv, obj, &handle);
- spin_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- spin_unlock(&dev->struct_mutex);
+ ret = drm_gem_handle_create(file, obj, &handle);
+ drm_gem_object_unreference_unlocked(obj);
+ if (ret)
+ return ret;
- args.handle = args.name;
- args.size = obj->size;
+ args->handle = handle;
+ args->size = obj->size;
- ret = DRM_COPY_TO_USER((void *) data, &args, sizeof (args));
- if (ret != 0)
- DRM_ERROR(" gem open error! %d", ret);
- return (ret);
+ return 0;
}
-/*
+/**
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
void
-drm_gem_open(struct drm_file *file_private)
+/* LINTED */
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
{
idr_list_init(&file_private->object_idr);
- mutex_init(&file_private->table_lock, NULL, MUTEX_DRIVER, NULL);
+ spin_lock_init(&file_private->table_lock);
}
-/*
+/**
* Called at device close to release the file's
* handle references on objects.
*/
-static void
-drm_gem_object_release_handle(struct drm_gem_object *obj)
+static int
+/* LINTED */
+drm_gem_object_release_handle(int id, void *ptr, void *data)
{
- drm_gem_object_handle_unreference(obj);
+ struct drm_file *file_priv = data;
+ struct drm_gem_object *obj = ptr;
+ struct drm_device *dev = obj->dev;
+
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, file_priv);
+
+ drm_gem_object_handle_unreference_unlocked(obj);
+
+ return 0;
}
-/*
+/**
* Called at close time when the filp is going away.
*
* Releases any remaining references on objects by this filp.
*/
void
+/* LINTED E_FUNC_ARG_UNUSED */
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
struct idr_list *entry;
- spin_lock(&dev->struct_mutex);
-
- idr_list_for_each(entry, &file_private->object_idr)
- drm_gem_object_release_handle(entry->obj);
+ struct drm_gem_object *obj;
+ idr_list_for_each(entry, &file_private->object_idr) {
+ obj = (struct drm_gem_object *)entry->obj;
+ (void) drm_gem_object_release_handle(obj->name, obj, (void *)file_private);
+ }
idr_list_free(&file_private->object_idr);
- spin_unlock(&dev->struct_mutex);
+}
+void
+drm_gem_object_release(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_local_map *map = obj->maplist.map;
+
+ if (MDB_TRACK_ENABLE) {
+ spin_lock(&dev->track_lock);
+ list_del(&obj->track_list);
+ spin_unlock(&dev->track_lock);
+
+ struct drm_history_list *r_list, *list_temp;
+ list_for_each_entry_safe(r_list, list_temp, struct drm_history_list, &obj->his_list, head) {
+ list_del(&r_list->head);
+ drm_free(r_list, sizeof (struct drm_history_list), DRM_MEM_MAPS);
+ }
+ list_del(&obj->his_list);
+ }
+
+ (void) idr_remove(&dev->map_idr, obj->maplist.user_token >> PAGE_SHIFT);
+ gfxp_umem_cookie_destroy(map->umem_cookie);
+ drm_free(map, sizeof (struct drm_local_map), DRM_MEM_MAPS);
+
+ kmem_free(obj->pfnarray, btopr(obj->real_size) * sizeof (pfn_t));
+
+ if (obj->dma_hdl == NULL) {
+ gfxp_free_mempool(&obj->mempool_cookie, obj->kaddr, obj->real_size);
+ } else {
+ (void) ddi_dma_unbind_handle(obj->dma_hdl);
+ ddi_dma_mem_free(&obj->acc_hdl);
+ ddi_dma_free_handle(&obj->dma_hdl);
+ }
+ obj->kaddr = NULL;
}
-/*
+/**
* Called after the last reference to the object has been lost.
*
* Frees the object
*/
void
-drm_gem_object_free(struct drm_gem_object *obj)
+drm_gem_object_free(struct kref *kref)
{
+ /* LINTED */
+ struct drm_gem_object *obj = (struct drm_gem_object *) kref;
struct drm_device *dev = obj->dev;
- struct drm_local_map *map = obj->map;
+
+// BUG_ON(!mutex_is_locked(&dev->struct_mutex));
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
+}
- gfxp_umem_cookie_destroy(map->drm_umem_cookie);
- drm_free(map, sizeof (struct drm_local_map), DRM_MEM_MAPS);
-
- kmem_free(obj->pfnarray, btopr(obj->real_size) * sizeof (pfn_t));
-
- (void) ddi_dma_unbind_handle(obj->dma_hdl);
- ddi_dma_mem_free(&obj->acc_hdl);
- ddi_dma_free_handle(&obj->dma_hdl);
-
- atomic_dec(&dev->object_count);
- atomic_sub(obj->size, &dev->object_memory);
- kmem_free(obj, sizeof (struct drm_gem_object));
+/* LINTED E_FUNC_ARG_UNUSED */
+static void drm_gem_object_ref_bug(struct kref *list_kref)
+{
+ BUG_ON(1);
}
-/*
+/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
@@ -676,23 +726,78 @@ drm_gem_object_free(struct drm_gem_object *obj)
void
drm_gem_object_handle_free(struct drm_gem_object *obj)
{
- int err;
struct drm_device *dev = obj->dev;
+
/* Remove any name for this object */
spin_lock(&dev->object_name_lock);
- if (obj->flink) {
- err = idr_list_remove(&dev->object_name_idr, obj->name);
- if (err == -1)
- DRM_ERROR("%s", __func__);
- obj->flink = 0;
+ if (obj->name) {
+ (void) idr_list_remove(&dev->object_name_idr, obj->name);
+ obj->name = 0;
spin_unlock(&dev->object_name_lock);
/*
* The object name held a reference to this object, drop
* that now.
+ *
+ * This cannot be the last reference, since the handle holds one too.
*/
- drm_gem_object_unreference(obj);
+ kref_put(&obj->refcount, drm_gem_object_ref_bug);
} else
-
spin_unlock(&dev->object_name_lock);
}
+
+int
+drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+ obj->gtt_map_kaddr = gfxp_alloc_kernel_space(obj->real_size);
+ if (obj->gtt_map_kaddr == NULL) {
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void
+drm_gem_mmap(struct drm_gem_object *obj, pfn_t pfn)
+{
+ gfxp_load_kernel_space(pfn, obj->real_size, GFXP_MEMORY_WRITECOMBINED, obj->gtt_map_kaddr);
+}
+
+void
+drm_gem_release_mmap(struct drm_gem_object *obj)
+{
+ gfxp_unload_kernel_space(obj->gtt_map_kaddr, obj->real_size);
+}
+
+void
+drm_gem_free_mmap_offset(struct drm_gem_object *obj)
+{
+ struct ddi_umem_cookie *umem_cookie = obj->maplist.map->umem_cookie;
+ umem_cookie->cvaddr = obj->kaddr;
+
+ if (obj->maplist.map->gtt_mmap == 0) {
+ gfxp_free_kernel_space(obj->gtt_map_kaddr, obj->real_size);
+ DRM_DEBUG("already freed, don't free more than once!");
+ }
+
+ if (obj->maplist.map->gtt_mmap == 1) {
+ gfxp_unmap_kernel_space(obj->gtt_map_kaddr, obj->real_size);
+ obj->maplist.map->gtt_mmap = 0;
+ }
+
+ obj->gtt_map_kaddr = NULL;
+}
+
+void
+drm_gem_object_track(struct drm_gem_object *obj, const char *name,
+ uint32_t cur_seq, uint32_t last_seq, void* ptr)
+{
+ struct drm_history_list *list;
+ list = drm_alloc(sizeof (struct drm_history_list), DRM_MEM_MAPS);
+ if (list != NULL) {
+ (void) memcpy(list->info, name, (strlen(name) * sizeof(char)));
+ list->cur_seq = cur_seq;
+ list->last_seq = last_seq;
+ list->ring_ptr = ptr;
+ list_add_tail(&list->head, &obj->his_list, (caddr_t)list);
+ }
+}
diff --git a/usr/src/uts/common/io/drm/drm_io32.c b/usr/src/uts/common/io/drm/drm_io32.c
new file mode 100644
index 0000000..60b04c5
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_io32.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include "drm.h"
+#include "drmP.h"
+#include "drm_io32.h"
+
+#ifdef _MULTI_DATAMODEL
+
+int
+copyin32_drm_map(void *dest, void *src)
+{
+ struct drm_map *dest64 = dest;
+ struct drm_map_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->offset = dest32.offset;
+ dest64->size = dest32.size;
+ dest64->type = dest32.type;
+ dest64->flags = dest32.flags;
+ dest64->handle = dest32.handle;
+ dest64->mtrr = dest32.mtrr;
+
+ return (0);
+}
+
+int
+copyout32_drm_map(void *dest, void *src)
+{
+ struct drm_map *src64 = src;
+ struct drm_map_32 src32;
+
+ src32.offset = src64->offset;
+ src32.size = (uint32_t)src64->size;
+ src32.type = src64->type;
+ src32.flags = src64->flags;
+ src32.handle = src64->handle;
+ src32.mtrr = src64->mtrr;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_buf_desc(void *dest, void *src)
+{
+ struct drm_buf_desc *dest64 = dest;
+ struct drm_buf_desc_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->count = dest32.count;
+ dest64->size = dest32.size;
+ dest64->low_mark = dest32.low_mark;
+ dest64->high_mark = dest32.high_mark;
+ dest64->flags = dest32.flags;
+ dest64->agp_start = dest32.agp_start;
+
+ return (0);
+}
+
+int
+copyout32_drm_buf_desc(void *dest, void *src)
+{
+ struct drm_buf_desc *src64 = src;
+ struct drm_buf_desc_32 src32;
+
+ src32.count = src64->count;
+ src32.size = (uint32_t)src64->size;
+ src32.low_mark = src64->low_mark;
+ src32.high_mark = src64->high_mark;
+ src32.flags = src64->flags;
+ src32.agp_start = (uint32_t)src64->agp_start;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_buf_free(void *dest, void *src)
+{
+ struct drm_buf_free *dest64 = dest;
+ struct drm_buf_free_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->count = dest32.count;
+ dest64->list = (int *)(uintptr_t)dest32.list;
+
+ return (0);
+}
+
+int
+copyin32_drm_buf_map(void *dest, void *src)
+{
+ struct drm_buf_map *dest64 = dest;
+ struct drm_buf_map_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->count = dest32.count;
+ dest64->virtual = (void *)(uintptr_t)dest32.virtual;
+ dest64->list = (drm_buf_pub_t *)(uintptr_t)dest32.list;
+ dest64->fd = dest32.fd;
+
+ return (0);
+}
+
+int
+copyout32_drm_buf_map(void *dest, void *src)
+{
+ struct drm_buf_map *src64 = src;
+ struct drm_buf_map_32 src32;
+
+ src32.count = src64->count;
+ src32.virtual = (caddr32_t)(uintptr_t)src64->virtual;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_ctx_priv_map(void *dest, void *src)
+{
+ struct drm_ctx_priv_map *dest64 = dest;
+ struct drm_ctx_priv_map_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->ctx_id = dest32.ctx_id;
+ dest64->handle = (void *)(uintptr_t)dest32.handle;
+
+ return (0);
+}
+
+int
+copyout32_drm_ctx_priv_map(void *dest, void *src)
+{
+ struct drm_ctx_priv_map *src64 = src;
+ struct drm_ctx_priv_map_32 src32;
+
+ src32.ctx_id = src64->ctx_id;
+ src32.handle = (caddr32_t)(uintptr_t)src64->handle;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_ctx_res(void *dest, void *src)
+{
+ struct drm_ctx_res *dest64 = dest;
+ struct drm_ctx_res_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->count = dest32.count;
+ dest64->contexts = (struct drm_ctx *)(uintptr_t)dest32.contexts;
+
+ return (0);
+}
+
+int
+copyout32_drm_ctx_res(void *dest, void *src)
+{
+ struct drm_ctx_res *src64 = src;
+ struct drm_ctx_res_32 src32;
+
+ src32.count = src64->count;
+ src32.contexts = (caddr32_t)(uintptr_t)src64->contexts;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_unique(void *dest, void *src)
+{
+ struct drm_unique *dest64 = dest;
+ struct drm_unique_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->unique_len = dest32.unique_len;
+ dest64->unique = (char __user *)(uintptr_t)dest32.unique;
+
+ return (0);
+}
+
+int
+copyout32_drm_unique(void *dest, void *src)
+{
+ struct drm_unique *src64 = src;
+ struct drm_unique_32 src32;
+
+ src32.unique_len = src64->unique_len;
+ src32.unique = (caddr32_t)(uintptr_t)src64->unique;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_client(void *dest, void *src)
+{
+ struct drm_client *dest64 = dest;
+ struct drm_client_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->idx = dest32.idx;
+ dest64->auth = dest32.auth;
+ dest64->pid = dest32.pid;
+ dest64->uid = dest32.uid;
+ dest64->magic = dest32.magic;
+ dest64->iocs = dest32.iocs;
+
+ return (0);
+}
+
+int
+copyout32_drm_client(void *dest, void *src)
+{
+ struct drm_client *src64 = src;
+ struct drm_client_32 src32;
+
+ src32.idx = src64->idx;
+ src32.auth = src64->auth;
+ src32.pid = (uint32_t)src64->pid;
+ src32.uid = (uint32_t)src64->uid;
+ src32.magic = (uint32_t)src64->magic;
+ src32.iocs = (uint32_t)src64->iocs;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyout32_drm_stats(void *dest, void *src)
+{
+ struct drm_stats *src64 = src;
+ struct drm_stats_32 src32;
+ int i;
+
+ src32.count = (uint32_t)src64->count;
+ for (i = 0; i < 15; i++) {
+ src32.data[i].value = src64->data[i].value;
+ src32.data[i].type = src64->data[i].type;
+ }
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_version(void *dest, void *src)
+{
+ struct drm_version *dest64 = dest;
+ struct drm_version_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->name_len = dest32.name_len;
+ dest64->name = (char *)(uintptr_t)dest32.name;
+ dest64->date_len = dest32.date_len;
+ dest64->date = (char *)(uintptr_t)dest32.date;
+ dest64->desc_len = dest32.desc_len;
+ dest64->desc = (char *)(uintptr_t)dest32.desc;
+
+ return (0);
+}
+
+int
+copyout32_drm_version(void *dest, void *src)
+{
+ struct drm_version *src64 = src;
+ struct drm_version_32 src32;
+
+ src32.version_major = src64->version_major;
+ src32.version_minor = src64->version_minor;
+ src32.version_patchlevel = src64->version_patchlevel;
+ src32.name_len = (uint32_t)src64->name_len;
+ src32.name = (caddr32_t)(uintptr_t)src64->name;
+ src32.date_len = (uint32_t)src64->date_len;
+ src32.date = (caddr32_t)(uintptr_t)src64->date;
+ src32.desc_len = (uint32_t)src64->desc_len;
+ src32.desc = (caddr32_t)(uintptr_t)src64->desc;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_wait_vblank(void *dest, void *src)
+{
+ union drm_wait_vblank *dest64 = dest;
+ union drm_wait_vblank_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->request.type = dest32.request.type;
+ dest64->request.sequence = dest32.request.sequence;
+ dest64->request.signal = dest32.request.signal;
+
+ return (0);
+}
+
+int
+copyout32_drm_wait_vblank(void *dest, void *src)
+{
+ union drm_wait_vblank *src64 = src;
+ union drm_wait_vblank_32 src32;
+
+ src32.reply.type = src64->reply.type;
+ src32.reply.sequence = src64->reply.sequence;
+ src32.reply.tval_sec = (int32_t)src64->reply.tval_sec;
+ src32.reply.tval_usec = (int32_t)src64->reply.tval_usec;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+int
+copyin32_drm_scatter_gather(void *dest, void *src)
+{
+ struct drm_scatter_gather *dest64 = dest;
+ struct drm_scatter_gather_32 dest32;
+
+ DRM_COPYFROM_WITH_RETURN(&dest32, (void *)src, sizeof(dest32));
+
+ dest64->size = dest32.size;
+ dest64->handle = dest32.handle;
+
+ return (0);
+}
+
+int
+copyout32_drm_scatter_gather(void *dest, void *src)
+{
+ struct drm_scatter_gather *src64 = src;
+ struct drm_scatter_gather_32 src32;
+
+ src32.size = (uint32_t)src64->size;
+ src32.handle = (uint32_t)src64->handle;
+
+ DRM_COPYTO_WITH_RETURN((void *)dest, &src32, sizeof(src32));
+
+ return (0);
+}
+
+#endif
diff --git a/usr/src/uts/common/io/drm/drm_ioctl.c b/usr/src/uts/common/io/drm/drm_ioctl.c
index 8d504a1..7efdf2b 100644
--- a/usr/src/uts/common/io/drm/drm_ioctl.c
+++ b/usr/src/uts/common/io/drm/drm_ioctl.c
@@ -1,8 +1,22 @@
/*
- * drm_ioctl.h -- IOCTL processing for DRM -*- linux-c -*-
- * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+/**
+ * \file drm_ioctl.c
+ * IOCTL processing for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
*/
+
/*
+ * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
+ *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -25,400 +39,293 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- *
- */
-
-/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "drmP.h"
+#include "drm_core.h"
#include "drm_io32.h"
-/*
- * Beginning in revision 1.1 of the DRM interface, getunique will return
- * a unique in the form pci:oooo:bb:dd.f (o=domain, b=bus, d=device, f=function)
- * before setunique has been called. The format for the bus-specific part of
- * the unique is not defined for any other bus.
+/**
+ * Get the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from drm_device::unique into user space.
*/
-/*ARGSUSED*/
-int
-drm_getunique(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_getunique(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_unique_t u1;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_unique_32_t u32;
-
- DRM_COPYFROM_WITH_RETURN(&u32, (void *)data, sizeof (u32));
- u1.unique_len = u32.unique_len;
- u1.unique = (char __user *)(uintptr_t)u32.unique;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&u1, (void *)data, sizeof (u1));
-
- if (u1.unique_len >= dev->unique_len) {
- if (dev->unique_len == 0) {
- DRM_ERROR("drm_getunique: dev->unique_len = 0");
- return (EFAULT);
- }
- if (DRM_COPY_TO_USER(u1.unique, dev->unique, dev->unique_len))
- return (EFAULT);
- }
- u1.unique_len = dev->unique_len;
+ struct drm_unique *u = data;
+ struct drm_master *master = file->master;
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_unique_32_t u32;
+ if (master->unique_len == 0 || master->unique == NULL) {
+ return -EFAULT;
+ }
- u32.unique_len = (uint32_t)u1.unique_len;
- u32.unique = (caddr32_t)(uintptr_t)u1.unique;
- DRM_COPYTO_WITH_RETURN((void *)data, &u32, sizeof (u32));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data, &u1, sizeof (u1));
+ if (u->unique_len >= master->unique_len) {
+ if (u->unique == NULL) {
+ return -EINVAL;
+ }
+ if (DRM_COPY_TO_USER(u->unique, master->unique, master->unique_len))
+ return -EFAULT;
+ }
+ u->unique_len = master->unique_len;
- return (0);
+ return 0;
}
/*
* Deprecated in DRM version 1.1, and will return EBUSY when setversion has
* requested version 1.1 or greater.
*/
-/*ARGSUSED*/
-int
-drm_setunique(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_setunique(DRM_IOCTL_ARGS)
{
- return (EINVAL);
+ return -EINVAL;
}
-
-static int
-drm_set_busid(drm_device_t *dev)
+static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
- DRM_LOCK();
-
- if (dev->unique != NULL) {
- DRM_UNLOCK();
- return (EBUSY);
- }
-
- dev->unique_len = 20;
- dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
- if (dev->unique == NULL) {
- DRM_UNLOCK();
- return (ENOMEM);
- }
-
- (void) snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%1x",
- dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
-
- DRM_UNLOCK();
-
- return (0);
+ struct drm_master *master = file_priv->master;
+ int len;
+
+ if (master->unique != NULL)
+ return -EBUSY;
+
+ master->unique_len = 40;
+ master->unique_size = master->unique_len;
+ master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ if (master->unique == NULL)
+ return -ENOMEM;
+
+ len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%1x",
+ dev->pdev->domain, dev->pdev->bus, dev->pdev->slot, dev->pdev->func);
+ if (len >= master->unique_len)
+ DRM_ERROR("buffer overflow");
+ else
+ master->unique_len = len;
+
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_getmap(DRM_IOCTL_ARGS)
+/**
+ * Get a mapping information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_map structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the mapping with the specified offset and copies its information
+ * into userspace
+ */
+/* LINTED */
+int drm_getmap(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_map_t map;
- drm_local_map_t *mapinlist;
- int idx;
- int i = 0;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_map_32_t map32;
-
- DRM_COPYFROM_WITH_RETURN(&map32, (void *)data, sizeof (map32));
- map.offset = map32.offset;
- map.size = map32.size;
- map.type = map32.type;
- map.flags = map32.flags;
- map.handle = map32.handle;
- map.mtrr = map32.mtrr;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&map, (void *)data, sizeof (map));
-
- idx = (int)map.offset;
-
- DRM_LOCK();
- if (idx < 0) {
- DRM_UNLOCK();
- return (EINVAL);
- }
-
- TAILQ_FOREACH(mapinlist, &dev->maplist, link) {
+ struct drm_map *map = data;
+ struct drm_map_list *r_list = NULL;
+ struct list_head *list;
+ int idx;
+ int i;
+
+ idx = (int)map->offset;
+ if (idx < 0)
+ return -EINVAL;
+
+ i = 0;
+ mutex_lock(&dev->struct_mutex);
+ list_for_each(list, &dev->maplist) {
if (i == idx) {
- map.offset = mapinlist->offset;
- map.size = mapinlist->size;
- map.type = mapinlist->type;
- map.flags = mapinlist->flags;
- map.handle = (unsigned long long)(uintptr_t)
- mapinlist->handle;
- map.mtrr = mapinlist->mtrr;
+ r_list = list_entry(list, struct drm_map_list, head);
break;
}
i++;
}
+ if (!r_list || !r_list->map) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
- DRM_UNLOCK();
-
- if (mapinlist == NULL)
- return (EINVAL);
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_map_32_t map32;
-
- map32.offset = map.offset;
- map32.size = (uint32_t)map.size;
- map32.type = map.type;
- map32.flags = map.flags;
- map32.handle = (uintptr_t)map.handle;
- map32.mtrr = map.mtrr;
- DRM_COPYTO_WITH_RETURN((void *)data, &map32, sizeof (map32));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data, &map, sizeof (map));
+ map->offset = r_list->map->offset;
+ map->size = r_list->map->size;
+ map->type = r_list->map->type;
+ map->flags = r_list->map->flags;
+ map->handle = (unsigned long long)(uintptr_t)r_list->user_token;
+ map->mtrr = r_list->map->mtrr;
+ mutex_unlock(&dev->struct_mutex);
- return (0);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_getclient(DRM_IOCTL_ARGS)
+/**
+ * Get client information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_client structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the client with the specified index and copies its information
+ * into userspace
+ */
+/* LINTED */
+int drm_getclient(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_client_t client;
- drm_file_t *pt;
- int idx;
- int i = 0;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_client_32_t client32;
-
- DRM_COPYFROM_WITH_RETURN(&client32, (void *)data,
- sizeof (client32));
- client.idx = client32.idx;
- client.auth = client32.auth;
- client.pid = client32.pid;
- client.uid = client32.uid;
- client.magic = client32.magic;
- client.iocs = client32.iocs;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&client, (void *)data,
- sizeof (client));
-
- idx = client.idx;
- DRM_LOCK();
- TAILQ_FOREACH(pt, &dev->files, link) {
- if (i == idx) {
- client.auth = pt->authenticated;
- client.pid = pt->pid;
- client.uid = pt->uid;
- client.magic = pt->magic;
- client.iocs = pt->ioctl_count;
- DRM_UNLOCK();
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) ==
- DDI_MODEL_ILP32) {
- drm_client_32_t client32;
-
- client32.idx = client.idx;
- client32.auth = client.auth;
- client32.pid = (uint32_t)client.pid;
- client32.uid = (uint32_t)client.uid;
- client32.magic = (uint32_t)client.magic;
- client32.iocs = (uint32_t)client.iocs;
-
- DRM_COPYTO_WITH_RETURN((void *)data, &client32,
- sizeof (client32));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data,
- &client, sizeof (client));
-
- return (0);
+ struct drm_client *client = data;
+ struct drm_file *pt;
+ int idx;
+ int i;
+
+ idx = client->idx;
+ i = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry(pt, struct drm_file, &dev->filelist, lhead){
+ if (i++ >= idx) {
+ client->auth = pt->authenticated;
+ client->pid = pt->pid;
+ client->uid = pt->uid;
+ client->magic = pt->magic;
+ client->iocs = pt->ioctl_count;
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
}
- i++;
}
- DRM_UNLOCK();
- return (EINVAL);
+ mutex_unlock(&dev->struct_mutex);
+
+ return -EINVAL;
}
-/*ARGSUSED*/
-int
-drm_getstats(DRM_IOCTL_ARGS)
+/**
+ * Get statistics information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_stats structure.
+ *
+ * \return zero on success or a negative number on failure.
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_getstats(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_stats_t stats;
- int i;
-
- bzero(&stats, sizeof (stats));
+ struct drm_stats *stats = data;
+ int i;
- DRM_LOCK();
+ (void) memset(stats, 0, sizeof(*stats));
for (i = 0; i < dev->counters; i++) {
- if (dev->types[i] == _DRM_STAT_LOCK) {
- stats.data[i].value
- = (dev->lock.hw_lock
- ? dev->lock.hw_lock->lock : 0);
- } else
- stats.data[i].value = atomic_read(&dev->counts[i]);
- stats.data[i].type = dev->types[i];
+ if (dev->types[i] == _DRM_STAT_LOCK)
+ stats->data[i].value =
+ (file->master->lock.hw_lock ? file->master->lock.hw_lock->lock : 0);
+ else
+ stats->data[i].value = atomic_read(&dev->counts[i]);
+ stats->data[i].type = dev->types[i];
}
- stats.count = dev->counters;
+ stats->count = dev->counters;
- DRM_UNLOCK();
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_stats_32_t stats32;
- stats32.count = (uint32_t)stats.count;
- for (i = 0; i < 15; i++) {
- stats32.data[i].value = stats.data[i].value;
- stats32.data[i].type = stats.data[i].type;
- }
- DRM_COPYTO_WITH_RETURN((void *)data, &stats32,
- sizeof (stats32));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data, &stats, sizeof (stats));
-
- return (0);
+ return 0;
}
-#define DRM_IF_MAJOR 1
-#define DRM_IF_MINOR 2
-
-/*ARGSUSED*/
-int
-drm_setversion(DRM_IOCTL_ARGS)
+/**
+ * Get device/driver capabilities
+ */
+/* LINTED E_FUNC_ARG_UNUSED */
+int drm_getcap(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_set_version_t sv;
- drm_set_version_t retv;
- int if_version;
-
- DRM_COPYFROM_WITH_RETURN(&sv, (void *)data, sizeof (sv));
-
- retv.drm_di_major = DRM_IF_MAJOR;
- retv.drm_di_minor = DRM_IF_MINOR;
- retv.drm_dd_major = dev->driver->driver_major;
- retv.drm_dd_minor = dev->driver->driver_minor;
-
- DRM_COPYTO_WITH_RETURN((void *)data, &retv, sizeof (sv));
+ struct drm_get_cap *req = data;
+
+ req->value = 0;
+ switch (req->capability) {
+ case DRM_CAP_DUMB_BUFFER:
+ if (dev->driver->dumb_create)
+ req->value = 1;
+ break;
+ case DRM_CAP_VBLANK_HIGH_CRTC:
+ req->value = 1;
+ break;
+ case DRM_CAP_DUMB_PREFERRED_DEPTH:
+ req->value = dev->mode_config.preferred_depth;
+ break;
+ case DRM_CAP_DUMB_PREFER_SHADOW:
+ req->value = dev->mode_config.prefer_shadow;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
- if (sv.drm_di_major != -1) {
- if (sv.drm_di_major != DRM_IF_MAJOR ||
- sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
- return (EINVAL);
- if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_dd_minor);
+/**
+ * Setversion ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Sets the requested interface version
+ */
+/* LINTED */
+int drm_setversion(DRM_IOCTL_ARGS)
+{
+ struct drm_set_version *sv = data;
+ int if_version, retcode = 0;
+
+ if (sv->drm_di_major != -1) {
+ if (sv->drm_di_major != DRM_IF_MAJOR ||
+ sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
+ retcode = -EINVAL;
+ goto done;
+ }
+ if_version = DRM_IF_VERSION(sv->drm_di_major,
+ sv->drm_di_minor);
dev->if_version = DRM_MAX(if_version, dev->if_version);
- if (sv.drm_di_minor >= 1) {
+ if (sv->drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
*/
- (void) drm_set_busid(dev);
+ (void) drm_set_busid(dev, file);
}
}
- if (sv.drm_dd_major != -1) {
- if (sv.drm_dd_major != dev->driver->driver_major ||
- sv.drm_dd_minor < 0 ||
- sv.drm_dd_minor > dev->driver->driver_minor)
- return (EINVAL);
+ if (sv->drm_dd_major != -1) {
+ if (sv->drm_dd_major != dev->driver->major ||
+ sv->drm_dd_minor < 0 || sv->drm_dd_minor >
+ dev->driver->minor) {
+ retcode = -EINVAL;
+ goto done;
+ }
+
+ /* OSOL_drm: if (dev->driver->set_version)
+ dev->driver->set_version(dev, sv); */
}
- return (0);
-}
+done:
+ sv->drm_di_major = DRM_IF_MAJOR;
+ sv->drm_di_minor = DRM_IF_MINOR;
+ sv->drm_dd_major = dev->driver->major;
+ sv->drm_dd_minor = dev->driver->minor;
-/*ARGSUSED*/
-int
-drm_noop(DRM_IOCTL_ARGS)
-{
- DRM_DEBUG("drm_noop\n");
- return (0);
+ return retcode;
}
-/*ARGSUSED*/
-int
-drm_version(DRM_IOCTL_ARGS)
+/** No-op ioctl. */
+/* LINTED */
+int drm_noop(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_version_t version;
- size_t len;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_version_32_t version32;
-
- DRM_COPYFROM_WITH_RETURN(&version32,
- (void *)data, sizeof (drm_version_32_t));
- version.name_len = version32.name_len;
- version.name = (char *)(uintptr_t)version32.name;
- version.date_len = version32.date_len;
- version.date = (char *)(uintptr_t)version32.date;
- version.desc_len = version32.desc_len;
- version.desc = (char *)(uintptr_t)version32.desc;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&version, (void *)data,
- sizeof (version));
-
-#define DRM_COPY(name, value) \
- len = strlen(value); \
- if (len > name##_len) len = name##_len; \
- name##_len = strlen(value); \
- if (len && name) { \
- if (DRM_COPY_TO_USER(name, value, len)) \
- return (EFAULT); \
- }
-
- version.version_major = dev->driver->driver_major;
- version.version_minor = dev->driver->driver_minor;
- version.version_patchlevel = dev->driver->driver_patchlevel;
-
- DRM_COPY(version.name, dev->driver->driver_name);
- DRM_COPY(version.date, dev->driver->driver_date);
- DRM_COPY(version.desc, dev->driver->driver_desc);
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_version_32_t version32;
-
- version32.version_major = version.version_major;
- version32.version_minor = version.version_minor;
- version32.version_patchlevel = version.version_patchlevel;
- version32.name_len = (uint32_t)version.name_len;
- version32.name = (caddr32_t)(uintptr_t)version.name;
- version32.date_len = (uint32_t)version.date_len;
- version32.date = (caddr32_t)(uintptr_t)version.date;
- version32.desc_len = (uint32_t)version.desc_len;
- version32.desc = (caddr32_t)(uintptr_t)version.desc;
- DRM_COPYTO_WITH_RETURN((void *)data, &version32,
- sizeof (drm_version_32_t));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data, &version,
- sizeof (version));
-
- return (0);
+ DRM_DEBUG("\n");
+ return 0;
}
+
diff --git a/usr/src/uts/common/io/drm/drm_irq.c b/usr/src/uts/common/io/drm/drm_irq.c
index 3d3640a..6a875bd 100644
--- a/usr/src/uts/common/io/drm/drm_irq.c
+++ b/usr/src/uts/common/io/drm/drm_irq.c
@@ -1,10 +1,21 @@
/*
- * drm_irq.c -- IRQ IOCTL and function support
- * Created: Fri Oct 18 2003 by anholt@FreeBSD.org
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*/
+
+/**
+ * \file drm_irq.c
+ * IRQ support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
/*
- * Copyright 2003 Eric Anholt
- * Copyright (c) 2009, Intel Corporation.
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009, 2013, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -21,375 +32,1185 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
- * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <anholt@FreeBSD.org>
- *
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*/
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#include "drmP.h"
#include "drm.h"
+#include "drmP.h"
#include "drm_io32.h"
-/*ARGSUSED*/
-int
-drm_irq_by_busid(DRM_IOCTL_ARGS)
+static irqreturn_t __irq_handler_wrap(DRM_IRQ_ARGS)
{
- DRM_DEVICE;
- drm_irq_busid_t irq;
+ drm_device_t *dev = (void *)arg;
+ int ret;
- DRM_COPYFROM_WITH_RETURN(&irq, (void *)data, sizeof (irq));
+ mutex_enter(&dev->irq_lock);
+ ret = dev->driver->irq_handler(arg);
+ mutex_exit(&dev->irq_lock);
- if ((irq.busnum >> 8) != dev->pci_domain ||
- (irq.busnum & 0xff) != dev->pci_bus ||
- irq.devnum != dev->pci_slot ||
- irq.funcnum != dev->pci_func)
- return (EINVAL);
+ return (ret);
+}
+
+/* LINTED */
+static irqreturn_t __irq_handler_wrap_msi(caddr_t arg1, caddr_t arg2)
+{
+ drm_device_t *dev = (void *)arg1;
+ int ret;
+
+ mutex_enter(&dev->irq_lock);
+ ret = dev->driver->irq_handler(arg1);
+ mutex_exit(&dev->irq_lock);
- irq.irq = dev->irq;
+ return (ret);
+}
- DRM_DEBUG("%d:%d:%d => IRQ %d\n",
- irq.busnum, irq.devnum, irq.funcnum, irq.irq);
+static int __install_irq_handler(struct drm_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int i, ret;
+
+ if (pdev->msi_handle) {
+ /* Call ddi_intr_add_handler() */
+ for (i = 0; i < pdev->msi_actual; i++) {
+ ret = ddi_intr_add_handler(pdev->msi_handle[i],
+ __irq_handler_wrap_msi, (caddr_t)dev, NULL);
+ if (ret != DDI_SUCCESS) {
+ DRM_DEBUG("ddi_intr_add_handler() failed");
+ return (ret);
+ }
+ }
- DRM_COPYTO_WITH_RETURN((void *)data, &irq, sizeof (irq));
+ if (pdev->msi_flag & DDI_INTR_FLAG_BLOCK) {
+ /* Call ddi_intr_block_enable() for MSI */
+ (void) ddi_intr_block_enable(pdev->msi_handle, pdev->msi_actual);
+ } else {
+ /* Call ddi_intr_enable() for MSI non block enable */
+ for (i = 0; i < pdev->msi_actual; i++)
+ (void) ddi_intr_enable(pdev->msi_handle[i]);
+ }
+ } else {
+ /* setup the interrupt handler */
+ if (ddi_add_intr(dev->devinfo, 0, &pdev->intr_block,
+ (ddi_idevice_cookie_t *)NULL, __irq_handler_wrap,
+ (caddr_t)dev) != DDI_SUCCESS) {
+ DRM_ERROR("ddi_add_intr failed");
+ return (DDI_FAILURE);
+ }
+ }
- return (0);
+ return (DDI_SUCCESS);
}
+static void __uninstall_irq_handler(struct drm_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int i;
+
+ ASSERT(dev->devinfo);
+
+ if (pdev->msi_handle) {
+ /* Disable all interrupts */
+ if (pdev->msi_flag & DDI_INTR_FLAG_BLOCK) {
+ /* Call ddi_intr_block_disable() */
+ (void) ddi_intr_block_disable(pdev->msi_handle, pdev->msi_actual);
+ } else {
+ for (i = 0; i < pdev->msi_actual; i++)
+ (void) ddi_intr_disable(pdev->msi_handle[i]);
+ }
+
+ /* Call ddi_intr_remove_handler() */
+ for (i = 0; i < pdev->msi_actual; i++){
+ (void) ddi_intr_remove_handler(pdev->msi_handle[i]);
+ }
+ } else {
+ ddi_remove_intr(dev->devinfo, 0, pdev->intr_block);
+ }
+}
-static irqreturn_t
-drm_irq_handler_wrap(DRM_IRQ_ARGS)
+int
+pci_enable_msi(struct pci_dev *pdev)
{
- drm_device_t *dev = (void *)arg;
- int ret;
+ struct drm_device *dev = pdev->dev;
+ dev_info_t *devinfo = dev->devinfo;
+ int count, avail, actual;
+ int types;
+ int i, ret;
+
+ /* Get supported interrupt types */
+ if (ddi_intr_get_supported_types(dev->devinfo, &types) != DDI_SUCCESS) {
+ DRM_DEBUG("ddi_intr_get_supported_types() failed");
+ return (DDI_FAILURE);
+ }
+ if (!(types & DDI_INTR_TYPE_MSI))
+ return (DDI_FAILURE);
- mutex_enter(&dev->irq_lock);
- ret = dev->driver->irq_handler(arg);
- mutex_exit(&dev->irq_lock);
+ /* Get number of interrupts */
+ ret = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
+ if ((ret != DDI_SUCCESS) || (count == 0)) {
+ DRM_DEBUG("ddi_intr_get_nintrs() failed, "
+ "ret: %d, count: %d", ret, count);
+ return (ret);
+ }
+
+ /* Get number of available interrupts */
+ ret = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
+ if ((ret != DDI_SUCCESS) || (avail == 0)) {
+ DRM_DEBUG("ddi_intr_get_navail() failed, "
+ "ret: %d, avail: %d", ret, avail);
+ return (ret);
+ }
+
+ if (avail < count) {
+ DRM_DEBUG("nitrs() returned %d, navail returned %d",
+ count, avail);
+ }
+
+ /* Allocate memory for MSI interrupts */
+ pdev->msi_size = count * sizeof (ddi_intr_handle_t);
+ pdev->msi_handle = kmem_alloc(pdev->msi_size, KM_SLEEP);
+
+ ret = ddi_intr_alloc(devinfo, pdev->msi_handle, DDI_INTR_TYPE_MSI, 0,
+ count, &actual, DDI_INTR_ALLOC_NORMAL);
+
+ if ((ret != DDI_SUCCESS) || (actual == 0)) {
+ DRM_DEBUG("ddi_intr_alloc() failed: %d", ret);
+ kmem_free(pdev->msi_handle, pdev->msi_size);
+ return (ret);
+ }
+ pdev->msi_actual = actual;
+
+ /*
+ * Get priority for first msi, assume remaining are all the same
+ */
+ ret = ddi_intr_get_pri(pdev->msi_handle[0], &pdev->msi_pri);
+ if (ret != DDI_SUCCESS) {
+ DRM_DEBUG("ddi_intr_get_pri() failed: %d", ret);
+ for(i = 0; i < actual; i++)
+ (void) ddi_intr_free(pdev->msi_handle[i]);
+ kmem_free(pdev->msi_handle, pdev->msi_size);
+ return (ret);
+ }
+
+ ret = ddi_intr_get_cap(pdev->msi_handle[0], &pdev->msi_flag);
+ if (ret != DDI_SUCCESS) {
+ DRM_DEBUG("ddi_intr_get_cap() failed: %d", ret);
+ for(i = 0; i < actual; i++)
+ (void) ddi_intr_free(pdev->msi_handle[i]);
+ kmem_free(pdev->msi_handle, pdev->msi_size);
+ return (ret);
+ }
return (ret);
}
+void
+pci_disable_msi(struct pci_dev *pdev)
+{
+ int i;
+
+ for (i = 0; i < pdev->msi_actual; i++)
+ (void) ddi_intr_free(pdev->msi_handle[i]);
+ kmem_free(pdev->msi_handle, pdev->msi_size);
+ pdev->msi_handle = NULL;
+}
+
+/* Access macro for slots in vblank timestamp ringbuffer. */
+#define vblanktimestamp(dev, crtc, count) ( \
+ (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
+ ((count) % DRM_VBLANKTIME_RBSIZE)])
+
+/* Retry timestamp calculation up to 3 times to satisfy
+ * drm_timestamp_precision before giving up.
+ */
+#define DRM_TIMESTAMP_MAXRETRIES 3
+
+/* Threshold in nanoseconds for detection of redundant
+ * vblank irq in drm_handle_vblank(). 1 msec should be ok.
+ */
+#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
+
+/**
+ * Get interrupt from bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_irq_busid structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Finds the PCI device with the specified bus id and gets its IRQ number.
+ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
+ * to that of the device that this DRM instance attached to.
+ */
+/* LINTED */
+int drm_irq_by_busid(DRM_IOCTL_ARGS)
+{
+ struct drm_irq_busid *p = data;
+
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return -EINVAL;
+
+ if ((p->busnum >> 8) != dev->pdev->domain ||
+ (p->busnum & 0xff) != dev->pdev->bus ||
+ p->devnum != dev->pdev->slot || p->funcnum != dev->pdev->func)
+ return -EINVAL;
+
+ p->irq = dev->pdev->irq;
+
+ DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+ p->irq);
+
+ return 0;
+}
+
+/*
+ * Clear vblank timestamp buffer for a crtc.
+ */
+static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
+{
+ (void) memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], -1,
+ DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+}
+
+/*
+ * Disable vblank irq's on crtc, make sure that last vblank count
+ * of hardware and corresponding consistent software vblank counter
+ * are preserved, even if there are any spurious vblank irq's after
+ * disable.
+ */
+static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+ u32 vblcount;
+ s64 diff_ns;
+ int vblrc;
+ struct timeval tvblank;
+ int count = DRM_TIMESTAMP_MAXRETRIES;
+
+ /* Prevent vblank irq processing while disabling vblank irqs,
+ * so no updates of timestamps or count can happen after we've
+ * disabled. Needed to prevent races in case of delayed irq's.
+ */
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+ dev->driver->disable_vblank(dev, crtc);
+ dev->vblank_enabled[crtc] = 0;
+
+ /* No further vblank irq's will be processed after
+ * this point. Get current hardware vblank count and
+ * vblank timestamp, repeat until they are consistent.
+ *
+ * FIXME: There is still a race condition here and in
+ * drm_update_vblank_count() which can cause off-by-one
+ * reinitialization of software vblank counter. If gpu
+ * vblank counter doesn't increment exactly at the leading
+ * edge of a vblank interval, then we can lose 1 count if
+ * we happen to execute between start of vblank and the
+ * delayed gpu counter increment.
+ */
+ do {
+ dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+ vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
+ } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+
+ if (!count)
+ vblrc = 0;
+
+ /* Compute time difference to stored timestamp of last vblank
+ * as updated by last invocation of drm_handle_vblank() in vblank irq.
+ */
+ vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* If there is at least 1 msec difference between the last stored
+ * timestamp and tvblank, then we are currently executing our
+ * disable inside a new vblank interval, the tvblank timestamp
+ * corresponds to this new vblank interval and the irq handler
+ * for this vblank didn't run yet and won't run due to our disable.
+ * Therefore we need to do the job of drm_handle_vblank() and
+ * increment the vblank counter by one to account for this vblank.
+ *
+ * Skip this step if there isn't any high precision timestamp
+ * available. In that case we can't account for this and just
+ * hope for the best.
+ */
+ if ((vblrc > 0) && (abs(diff_ns) > 1000000)) {
+ atomic_inc(&dev->_vblank_count[crtc]);
+ }
+
+ /* Invalidate all timestamps while vblank irq's are off. */
+ clear_vblank_timestamps(dev, crtc);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+}
+
static void vblank_disable_fn(void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
+ unsigned long irqflags;
int i;
if (!dev->vblank_disable_allowed)
return;
for (i = 0; i < dev->num_crtcs; i++) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
- atomic_read(&dev->vblank_enabled[i]) == 1) {
- dev->last_vblank[i] =
- dev->driver->get_vblank_counter(dev, i);
- dev->driver->disable_vblank(dev, i);
- atomic_set(&dev->vblank_enabled[i], 0);
- DRM_DEBUG("disable vblank");
+ dev->vblank_enabled[i]) {
+ DRM_DEBUG("disabling vblank on crtc %d\n", i);
+ vblank_disable_and_save(dev, i);
}
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
}
-void
-drm_vblank_cleanup(struct drm_device *dev)
+void drm_vblank_cleanup(struct drm_device *dev)
{
-
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
+ del_timer(&dev->vblank_disable_timer);
+ destroy_timer(&dev->vblank_disable_timer);
+
vblank_disable_fn((void *)dev);
- drm_free(dev->vbl_queues, sizeof (wait_queue_head_t) * dev->num_crtcs,
- DRM_MEM_DRIVER);
- drm_free(dev->vbl_sigs, sizeof (struct drm_vbl_sig) * dev->num_crtcs,
- DRM_MEM_DRIVER);
- drm_free(dev->_vblank_count, sizeof (atomic_t) *
- dev->num_crtcs, DRM_MEM_DRIVER);
- drm_free(dev->vblank_refcount, sizeof (atomic_t) *
- dev->num_crtcs, DRM_MEM_DRIVER);
- drm_free(dev->vblank_enabled, sizeof (int) *
- dev->num_crtcs, DRM_MEM_DRIVER);
- drm_free(dev->last_vblank, sizeof (u32) * dev->num_crtcs,
- DRM_MEM_DRIVER);
- drm_free(dev->vblank_inmodeset, sizeof (*dev->vblank_inmodeset) *
- dev->num_crtcs, DRM_MEM_DRIVER);
+ kfree(dev->vbl_queue, sizeof (wait_queue_head_t) * dev->num_crtcs);
+ kfree(dev->_vblank_count, sizeof (atomic_t) * dev->num_crtcs);
+ kfree(dev->vblank_refcount, sizeof (atomic_t) * dev->num_crtcs);
+ kfree(dev->vblank_enabled, sizeof (int) * dev->num_crtcs);
+ kfree(dev->last_vblank, sizeof (u32) * dev->num_crtcs);
+ kfree(dev->last_vblank_wait, sizeof (u32) * dev->num_crtcs);
+ kfree(dev->vblank_inmodeset, sizeof (*dev->vblank_inmodeset) * dev->num_crtcs);
+ kfree(dev->_vblank_time, sizeof (*dev->_vblank_time) * dev->num_crtcs * DRM_VBLANKTIME_RBSIZE);
+
dev->num_crtcs = 0;
+
+ mutex_destroy(&dev->vbl_lock);
}
-int
-drm_vblank_init(struct drm_device *dev, int num_crtcs)
+int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
- int i, ret = ENOMEM;
-
- atomic_set(&dev->vbl_signal_pending, 0);
- dev->num_crtcs = num_crtcs;
+ int i, ret = -ENOMEM;
+ init_timer(&dev->vblank_disable_timer);
+ setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
+ dev);
+ mutex_init(&dev->vbl_lock, NULL, MUTEX_DRIVER, (void *)dev->pdev->intr_block);
+ spin_lock_init(&dev->vblank_time_lock);
- dev->vbl_queues = drm_alloc(sizeof (wait_queue_head_t) * num_crtcs,
- DRM_MEM_DRIVER);
- if (!dev->vbl_queues)
- goto err;
+ dev->num_crtcs = num_crtcs;
- dev->vbl_sigs = drm_alloc(sizeof (struct drm_vbl_sig) * num_crtcs,
- DRM_MEM_DRIVER);
- if (!dev->vbl_sigs)
+ dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
+ GFP_KERNEL);
+ if (!dev->vbl_queue)
goto err;
- dev->_vblank_count = drm_alloc(sizeof (atomic_t) * num_crtcs,
- DRM_MEM_DRIVER);
+ dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
if (!dev->_vblank_count)
goto err;
- dev->vblank_refcount = drm_alloc(sizeof (atomic_t) * num_crtcs,
- DRM_MEM_DRIVER);
+ dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
+ GFP_KERNEL);
if (!dev->vblank_refcount)
goto err;
- dev->vblank_enabled = drm_alloc(num_crtcs * sizeof (int),
- DRM_MEM_DRIVER);
+ dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
if (!dev->vblank_enabled)
goto err;
- dev->last_vblank = drm_alloc(num_crtcs * sizeof (u32), DRM_MEM_DRIVER);
+ dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
if (!dev->last_vblank)
goto err;
- dev->vblank_inmodeset = drm_alloc(num_crtcs * sizeof (int),
- DRM_MEM_DRIVER);
+ dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
+ if (!dev->last_vblank_wait)
+ goto err;
+
+ dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
if (!dev->vblank_inmodeset)
goto err;
+ dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
+ sizeof(struct timeval), GFP_KERNEL);
+ if (!dev->_vblank_time)
+ goto err;
+
+ DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
+
+ /* Driver specific high-precision vblank timestamping supported? */
+ if (dev->driver->get_vblank_timestamp)
+ DRM_INFO("Driver supports precise vblank timestamp query.\n");
+ else
+ DRM_INFO("No driver support for vblank timestamp query.\n");
+
/* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
- DRM_INIT_WAITQUEUE(&dev->vbl_queues[i], DRM_INTR_PRI(dev));
- TAILQ_INIT(&dev->vbl_sigs[i]);
+ DRM_INIT_WAITQUEUE(&dev->vbl_queue[i], DRM_INTR_PRI(dev));
atomic_set(&dev->_vblank_count[i], 0);
atomic_set(&dev->vblank_refcount[i], 0);
}
- dev->vblank_disable_allowed = 1;
- return (0);
+ dev->vblank_disable_allowed = 0;
+
+ return 0;
err:
- DRM_ERROR("drm_vblank_init: alloc error");
drm_vblank_cleanup(dev);
- return (ret);
+ return ret;
}
-/*ARGSUSED*/
-static int
-drm_install_irq_handle(drm_device_t *dev)
+/* LINTED */
+static void drm_irq_vgaarb_nokms(void *cookie, bool state)
{
- dev_info_t *dip = dev->dip;
+ struct drm_device *dev = cookie;
- if (dip == NULL) {
- DRM_ERROR("drm_install_irq_handle: cannot get vgatext's dip");
- return (DDI_FAILURE);
- }
-
- if (ddi_intr_hilevel(dip, 0) != 0) {
- DRM_ERROR("drm_install_irq_handle: "
- "high-level interrupts are not supported");
- return (DDI_FAILURE);
+ if (dev->driver->vgaarb_irq) {
+ dev->driver->vgaarb_irq(dev, state);
+ return;
}
- if (ddi_get_iblock_cookie(dip, (uint_t)0,
- &dev->intr_block) != DDI_SUCCESS) {
- DRM_ERROR("drm_install_irq_handle: cannot get iblock cookie");
- return (DDI_FAILURE);
- }
+ if (!dev->irq_enabled)
+ return;
- /* setup the interrupt handler */
- if (ddi_add_intr(dip, 0, &dev->intr_block,
- (ddi_idevice_cookie_t *)NULL, drm_irq_handler_wrap,
- (caddr_t)dev) != DDI_SUCCESS) {
- DRM_ERROR("drm_install_irq_handle: ddi_add_intr failed");
- return (DDI_FAILURE);
+ if (state) {
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+ } else {
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
+ if (dev->driver->irq_postinstall)
+ dev->driver->irq_postinstall(dev);
}
-
- return (DDI_SUCCESS);
}
-/*ARGSUSED*/
-int
-drm_irq_install(drm_device_t *dev)
+/**
+ * Install IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Initializes the IRQ related data. Installs the handler, calling the driver
+ * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
+ * before and after the installation.
+ */
+int drm_irq_install(struct drm_device *dev)
{
int ret;
- if (dev->dev_private == NULL) {
- DRM_ERROR("drm_irq_install: dev_private is NULL");
- return (EINVAL);
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return -EINVAL;
+
+ if (dev->pdev->irq == 0)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* Driver must have been initialized */
+ if (!dev->dev_private) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
}
if (dev->irq_enabled) {
- DRM_ERROR("drm_irq_install: irq already enabled");
- return (EBUSY);
+ mutex_unlock(&dev->struct_mutex);
+ return -EBUSY;
}
+ dev->irq_enabled = 1;
+ mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("drm_irq_install irq=%d\n", dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
- /* before installing handler */
- ret = dev->driver->irq_preinstall(dev);
- if (ret)
- return (EINVAL);
+ /* Before installing handler */
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
- /* install handler */
- ret = drm_install_irq_handle(dev);
+ /* Install handler */
+ ret = __install_irq_handler(dev);
if (ret != DDI_SUCCESS) {
- DRM_ERROR("drm_irq_install: drm_install_irq_handle failed");
- return (ret);
+ DRM_ERROR("IRQ handler installation failed");
+ mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+ return -EFAULT;
}
- /* after installing handler */
- dev->driver->irq_postinstall(dev);
+ /* After installing handler */
+ if (dev->driver->irq_postinstall)
+ ret = dev->driver->irq_postinstall(dev);
+ if (ret < 0) {
+ mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
- dev->irq_enabled = 1;
dev->context_flag = 0;
-
- return (0);
+ return 0;
}
-static void
-drm_uninstall_irq_handle(drm_device_t *dev)
+/**
+ * Uninstall the IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
+ */
+int drm_irq_uninstall(struct drm_device * dev)
{
- ASSERT(dev->dip);
- ddi_remove_intr(dev->dip, 0, dev->intr_block);
-}
+ unsigned long irqflags;
+ int irq_enabled, i;
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return -EINVAL;
-/*ARGSUSED*/
-int
-drm_irq_uninstall(drm_device_t *dev)
-{
- int i;
- if (!dev->irq_enabled) {
- return (EINVAL);
- }
+ mutex_lock(&dev->struct_mutex);
+ irq_enabled = dev->irq_enabled;
dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
/*
* Wake up any waiters so they don't hang.
*/
- DRM_SPINLOCK(&dev->vbl_lock);
- for (i = 0; i < dev->num_crtcs; i++) {
- DRM_WAKEUP(&dev->vbl_queues[i]);
- dev->vblank_enabled[i] = 0;
+ if (dev->num_crtcs) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for (i = 0; i < dev->num_crtcs; i++) {
+ DRM_WAKEUP(&dev->vbl_queue[i]);
+ dev->vblank_enabled[i] = 0;
+ dev->last_vblank[i] =
+ dev->driver->get_vblank_counter(dev, i);
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
- DRM_SPINUNLOCK(&dev->vbl_lock);
- dev->driver->irq_uninstall(dev);
- drm_uninstall_irq_handle(dev);
- dev->locked_tasklet_func = NULL;
+ if (!irq_enabled)
+ return -EINVAL;
- return (DDI_SUCCESS);
+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+
+ __uninstall_irq_handler(dev);
+
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_control(DRM_IOCTL_ARGS)
+/**
+ * IRQ control ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_control structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls irq_install() or irq_uninstall() according to \p arg.
+ */
+/* LINTED */
+int drm_control(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_control_t ctl;
- int err;
+ struct drm_control *ctl = data;
- DRM_COPYFROM_WITH_RETURN(&ctl, (void *)data, sizeof (ctl));
+ /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
- switch (ctl.func) {
+
+ switch (ctl->func) {
case DRM_INST_HANDLER:
- /*
- * Handle drivers whose DRM used to require IRQ setup but the
- * no longer does.
- */
- return (drm_irq_install(dev));
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+ if (dev->if_version < DRM_IF_VERSION(1, 2) &&
+ ctl->irq != dev->pdev->irq)
+ return -EINVAL;
+ return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
- err = drm_irq_uninstall(dev);
- return (err);
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+ return 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+ return drm_irq_uninstall(dev);
default:
- return (EINVAL);
+ return -EINVAL;
}
}
-u32
-drm_vblank_count(struct drm_device *dev, int crtc)
+/**
+ * drm_calc_timestamping_constants - Calculate and
+ * store various constants which are later needed by
+ * vblank and swap-completion timestamping, e.g, by
+ * drm_calc_vbltimestamp_from_scanoutpos().
+ * They are derived from crtc's true scanout timing,
+ * so they take things like panel scaling or other
+ * adjustments into account.
+ *
+ * @crtc drm_crtc whose timestamp constants should be updated.
+ *
+ */
+void drm_calc_timestamping_constants(struct drm_crtc *crtc)
{
- return (atomic_read(&dev->_vblank_count[crtc]));
+ s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+ u64 dotclock;
+
+ /* Dot clock in Hz: */
+ dotclock = (u64) crtc->hwmode.clock * 1000;
+
+ /* Fields of interlaced scanout modes are only halve a frame duration.
+ * Double the dotclock to get halve the frame-/line-/pixelduration.
+ */
+ if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
+ dotclock *= 2;
+
+ /* Valid dotclock? */
+ if (dotclock > 0) {
+ int frame_size;
+ /* Convert scanline length in pixels and video dot clock to
+ * line duration, frame duration and pixel duration in
+ * nanoseconds:
+ */
+ pixeldur_ns = (s64) div_u64(1000000000, dotclock);
+ linedur_ns = (s64) div_u64(((u64) crtc->hwmode.crtc_htotal *
+ 1000000000), dotclock);
+ frame_size = crtc->hwmode.crtc_htotal *
+ crtc->hwmode.crtc_vtotal;
+ framedur_ns = (s64) div_u64((u64) frame_size * 1000000000,
+ dotclock);
+ } else
+ DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+ crtc->base.id);
+
+ crtc->pixeldur_ns = pixeldur_ns;
+ crtc->linedur_ns = linedur_ns;
+ crtc->framedur_ns = framedur_ns;
+
+ DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+ crtc->base.id, crtc->hwmode.crtc_htotal,
+ crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
+ DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+ crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
+ (int) linedur_ns, (int) pixeldur_ns);
}
+/**
+ * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
+ * drivers. Implements calculation of exact vblank timestamps from
+ * given drm_display_mode timings and current video scanout position
+ * of a crtc. This can be called from within get_vblank_timestamp()
+ * implementation of a kms driver to implement the actual timestamping.
+ *
+ * Should return timestamps conforming to the OML_sync_control OpenML
+ * extension specification. The timestamp corresponds to the end of
+ * the vblank interval, aka start of scanout of topmost-leftmost display
+ * pixel in the following video frame.
+ *
+ * Requires support for optional dev->driver->get_scanout_position()
+ * in kms driver, plus a bit of setup code to provide a drm_display_mode
+ * that corresponds to the true scanout timing.
+ *
+ * The current implementation only handles standard video modes. It
+ * returns as no operation if a doublescan or interlaced video mode is
+ * active. Higher level code is expected to handle this.
+ *
+ * @dev: DRM device.
+ * @crtc: Which crtc's vblank timestamp to retrieve.
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs).
+ * On return contains true maximum error of timestamp.
+ * @vblank_time: Pointer to struct timeval which should receive the timestamp.
+ * @flags: Flags to pass to driver:
+ * 0 = Default.
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ * @refcrtc: drm_crtc* of crtc which defines scanout timing.
+ *
+ * Returns negative value on error, failure or if not supported in current
+ * video mode:
+ *
+ * -EINVAL - Invalid crtc.
+ * -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP - Function not supported in current display mode.
+ * -EIO - Failed, e.g., due to failed scanout position query.
+ *
+ * Returns or'ed positive status flags on success:
+ *
+ * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
+ * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
+ *
+ */
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ struct drm_crtc *refcrtc)
+{
+ struct timeval stime, raw_time;
+ struct drm_display_mode *mode;
+ int vbl_status, vtotal, vdisplay;
+ int vpos, hpos, i;
+ s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+ bool invbl;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Scanout position query not supported? Should not happen. */
+ if (!dev->driver->get_scanout_position) {
+ DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+ return -EIO;
+ }
+
+ mode = &refcrtc->hwmode;
+ vtotal = mode->crtc_vtotal;
+ vdisplay = mode->crtc_vdisplay;
+
+ /* Durations of frames, lines, pixels in nanoseconds. */
+ framedur_ns = refcrtc->framedur_ns;
+ linedur_ns = refcrtc->linedur_ns;
+ pixeldur_ns = refcrtc->pixeldur_ns;
+
+ /* If mode timing undefined, just return as no-op:
+ * Happens during initial modesetting of a crtc.
+ */
+ if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
+ DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+ return -EAGAIN;
+ }
+
+ /* Get current scanout position with system timestamp.
+ * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
+ * if single query takes longer than max_error nanoseconds.
+ *
+ * This guarantees a tight bound on maximum error if
+ * code gets preempted or delayed for some reason.
+ */
+ for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
+ /* Disable preemption to make it very likely to
+ * succeed in the first iteration even on PREEMPT_RT kernel.
+ */
+
+ /* Get system timestamp before query. */
+ do_gettimeofday(&stime);
+
+ /* Get vertical and horizontal scanout pos. vpos, hpos. */
+ vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
+
+ /* Get system timestamp after query. */
+ do_gettimeofday(&raw_time);
+
+ /* Return as no-op if scanout query unsupported or failed. */
+ if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
+ DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
+ crtc, vbl_status);
+ return -EIO;
+ }
+
+ duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+
+ /* Accept result with < max_error nsecs timing uncertainty. */
+ if (duration_ns <= (s64) *max_error)
+ break;
+ }
+
+ /* Noisy system timing? */
+ if (i == DRM_TIMESTAMP_MAXRETRIES) {
+ DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
+ crtc, (int) duration_ns/1000, *max_error/1000, i);
+ }
+
+ /* Return upper bound of timestamp precision error. */
+ *max_error = (int) duration_ns;
+
+ /* Check if in vblank area:
+ * vpos is >=0 in video scanout area, but negative
+ * within vblank area, counting down the number of lines until
+ * start of scanout.
+ */
+ invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
+
+ /* Convert scanout position into elapsed time at raw_time query
+ * since start of scanout at first display scanline. delta_ns
+ * can be negative if start of scanout hasn't happened yet.
+ */
+ delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
+
+ /* Is vpos outside nominal vblank area, but less than
+ * 1/100 of a frame height away from start of vblank?
+ * If so, assume this isn't a massively delayed vblank
+ * interrupt, but a vblank interrupt that fired a few
+ * microseconds before true start of vblank. Compensate
+ * by adding a full frame duration to the final timestamp.
+ * Happens, e.g., on ATI R500, R600.
+ *
+ * We only do this if DRM_CALLED_FROM_VBLIRQ.
+ */
+ if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
+ ((vdisplay - vpos) < vtotal / 100)) {
+ delta_ns = delta_ns - framedur_ns;
+
+ /* Signal this correction as "applied". */
+ vbl_status |= 0x8;
+ }
+
+ /* Subtract time delta from raw timestamp to get final
+ * vblank_time timestamp for end of vblank.
+ */
+ ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns, vblank_time);
+
+ DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
+ crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec,
+ raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec,
+ (int) duration_ns/1000, i);
+
+ vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
+ if (invbl)
+ vbl_status |= DRM_VBLANKTIME_INVBL;
+
+ return vbl_status;
+}
+
+/**
+ * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
+ * vblank interval.
+ *
+ * @dev: DRM device
+ * @crtc: which crtc's vblank timestamp to retrieve
+ * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+ * 0 = Default.
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ *
+ * Fetches the system timestamp corresponding to the time of the most recent
+ * vblank interval on specified crtc. May call into kms-driver to
+ * compute the timestamp with a high-precision GPU specific method.
+ *
+ * Returns zero if timestamp originates from uncorrected do_gettimeofday()
+ * call, i.e., it isn't very precisely locked to the true vblank.
+ *
+ * Returns non-zero if timestamp is considered to be very precise.
+ */
+u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags)
+{
+ int ret = 0;
+
+ /* Define requested maximum error on timestamps (nanoseconds). */
+ int max_error = (int) drm_timestamp_precision * 1000;
+
+ /* Query driver if possible and precision timestamping enabled. */
+ if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
+ ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+ tvblank, flags);
+ if (ret > 0)
+ return (u32) ret;
+ }
+
+ /* GPU high precision timestamp query unsupported or failed.
+ * Return gettimeofday timestamp as best estimate.
+ */
+ do_gettimeofday(tvblank);
+
+ return 0;
+}
+
+/**
+ * drm_vblank_count - retrieve "cooked" vblank counter value
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ */
+u32 drm_vblank_count(struct drm_device *dev, int crtc)
+{
+ return atomic_read(&dev->_vblank_count[crtc]);
+}
+
+/**
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
+ * and the system timestamp corresponding to that vblank counter value.
+ *
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current value vblank counter
+ * value.
+ */
+u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime)
+{
+ u32 cur_vblank;
+
+ /* Read timestamp from slot of _vblank_time ringbuffer
+ * that corresponds to current vblank count. Retry if
+ * count has incremented during readout. This works like
+ * a seqlock.
+ */
+ do {
+ cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+ *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+ } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+
+ return cur_vblank;
+}
+
+/* LINTED */
+static void send_vblank_event(struct drm_device *dev,
+ struct drm_pending_vblank_event *e,
+ unsigned long seq, struct timeval *now)
+{
+ e->event.sequence = (u32) seq;
+ e->event.tv_sec = now->tv_sec;
+ e->event.tv_usec = now->tv_usec;
+
+ list_add_tail(&e->base.link, &e->base.file_priv->event_list, (caddr_t)&e->base);
+ DRM_WAKEUP(&e->base.file_priv->event_wait);
+}
+
+/**
+ * drm_send_vblank_event - helper to send vblank event after pageflip
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ */
+void drm_send_vblank_event(struct drm_device *dev, int crtc,
+ struct drm_pending_vblank_event *e)
+{
+ struct timeval now;
+ unsigned int seq;
+ if (crtc >= 0) {
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+ } else {
+ seq = 0;
+
+ do_gettimeofday(&now);
+ }
+ e->pipe = crtc;
+ send_vblank_event(dev, e, seq, &now);
+}
+
+/**
+ * drm_update_vblank_count - update the master vblank counter
+ * @dev: DRM device
+ * @crtc: counter to update
+ *
+ * Call back into the driver to update the appropriate vblank counter
+ * (specified by @crtc). Deal with wraparound, if it occurred, and
+ * update the last read value so we can deal with wraparound on the next
+ * call if necessary.
+ *
+ * Only necessary when going from off->on, to account for frames we
+ * didn't get an interrupt for.
+ *
+ * Note: caller must hold dev->vbl_lock since this reads & writes
+ * device vblank fields.
+ */
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
- u32 cur_vblank, diff;
+ u32 cur_vblank, diff, tslot, rc;
+ struct timeval t_vblank;
+
/*
* Interrupts were disabled prior to this call, so deal with counter
* wrap if needed.
* NOTE! It's possible we lost a full dev->max_vblank_count events
* here if the register is small or we had vblank interrupts off for
* a long time.
+ *
+ * We repeat the hardware vblank counter & timestamp query until
+ * we get consistent results. This to prevent races between gpu
+ * updating its hardware counter while we are retrieving the
+ * corresponding vblank timestamp.
*/
- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ do {
+ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
+ } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+
+ /* Deal with counter wrap */
diff = cur_vblank - dev->last_vblank[crtc];
if (cur_vblank < dev->last_vblank[crtc]) {
diff += dev->max_vblank_count;
- DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
- crtc, dev->last_vblank[crtc], cur_vblank, diff);
+
+ DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+ crtc, dev->last_vblank[crtc], cur_vblank, diff);
+ }
+
+ DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
+ crtc, diff);
+
+ /* Reinitialize corresponding vblank timestamp if high-precision query
+ * available. Skip this step if query unsupported or failed. Will
+ * reinitialize delayed at next vblank interrupt in that case.
+ */
+ if (rc) {
+ tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+ vblanktimestamp(dev, crtc, tslot) = t_vblank;
}
atomic_add(diff, &dev->_vblank_count[crtc]);
}
-static timeout_id_t timer_id = NULL;
-
-int
-drm_vblank_get(struct drm_device *dev, int crtc)
+/**
+ * drm_vblank_get - get a reference count on vblank events
+ * @dev: DRM device
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * RETURNS
+ * Zero on success, nonzero on failure.
+ */
+int drm_vblank_get(struct drm_device *dev, int crtc)
{
+ unsigned long irqflags, irqflags2;
int ret = 0;
- DRM_SPINLOCK(&dev->vbl_lock);
-
- if (timer_id != NULL) {
- (void) untimeout(timer_id);
- timer_id = NULL;
- }
-
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
- atomic_add(1, &dev->vblank_refcount[crtc]);
- if (dev->vblank_refcount[crtc] == 1 &&
- atomic_read(&dev->vblank_enabled[crtc]) == 0) {
- ret = dev->driver->enable_vblank(dev, crtc);
- if (ret)
+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
+ if (!dev->vblank_enabled[crtc]) {
+ ret = dev->driver->enable_vblank(dev, crtc);
+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
+ crtc, ret);
+ if (ret)
+ atomic_dec(&dev->vblank_refcount[crtc]);
+ else {
+ dev->vblank_enabled[crtc] = 1;
+ drm_update_vblank_count(dev, crtc);
+ }
+ }
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
+ } else {
+ if (!dev->vblank_enabled[crtc]) {
atomic_dec(&dev->vblank_refcount[crtc]);
- else {
- atomic_set(&dev->vblank_enabled[crtc], 1);
- drm_update_vblank_count(dev, crtc);
+ ret = -EINVAL;
}
}
- DRM_SPINUNLOCK(&dev->vbl_lock);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- return (ret);
+ return ret;
}
-void
-drm_vblank_put(struct drm_device *dev, int crtc)
+/**
+ * drm_vblank_put - give up ownership of vblank events
+ * @dev: DRM device
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible.
+ */
+void drm_vblank_put(struct drm_device *dev, int crtc)
{
- DRM_SPINLOCK(&dev->vbl_lock);
+ BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
+
/* Last user schedules interrupt disable */
- atomic_dec(&dev->vblank_refcount[crtc]);
+ if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
+ (drm_vblank_offdelay > 0))
+ mod_timer(&dev->vblank_disable_timer,
+ ((drm_vblank_offdelay * DRM_HZ)/1000));
+}
- if (dev->vblank_refcount[crtc] == 0)
- timer_id = timeout(vblank_disable_fn, (void *) dev, 5*DRM_HZ);
+/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Caller must hold event lock.
+ */
+void drm_vblank_off(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned int seq;
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ vblank_disable_and_save(dev, crtc);
+ DRM_WAKEUP(&dev->vbl_queue[crtc]);
+
+ /* Send any queued vblank events, lest the natives grow disquiet */
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+ spin_lock(&dev->event_lock);
+ list_for_each_entry_safe(e, t, struct drm_pending_vblank_event,
+ &dev->vblank_event_list, base.link) {
+ if (e->pipe != crtc)
+ continue;
+ DRM_DEBUG("Sending premature vblank event on disable: \
+ wanted %d, current %d\n",
+ e->event.sequence, seq);
+ list_del(&e->base.link);
+ drm_vblank_put(dev, e->pipe);
+ send_vblank_event(dev, e, seq, &now);
+ }
+ spin_unlock(&dev->event_lock);
- DRM_SPINUNLOCK(&dev->vbl_lock);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
-/*
+/**
+ * drm_vblank_pre_modeset - account for vblanks across mode sets
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @post: post or pre mode set?
+ *
+ * Account for vblank events across mode setting events, which will likely
+ * reset the hardware frame counter.
+ */
+void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+{
+ /* vblank is not initialized (IRQ not installed ?) */
+ if (!dev->num_crtcs)
+ return;
+ /*
+ * To avoid all the problems that might happen if interrupts
+ * were enabled/disabled around or between these calls, we just
+ * have the kernel take a reference on the CRTC (just once though
+ * to avoid corrupting the count if multiple, mismatch calls occur),
+ * so that interrupts remain enabled in the interim.
+ */
+ if (!dev->vblank_inmodeset[crtc]) {
+ dev->vblank_inmodeset[crtc] = 0x1;
+ if (drm_vblank_get(dev, crtc) == 0)
+ dev->vblank_inmodeset[crtc] |= 0x2;
+ }
+}
+
+void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
+ if (!dev->num_crtcs)
+ return;
+
+ if (dev->vblank_inmodeset[crtc]) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ dev->vblank_disable_allowed = 1;
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+ if (dev->vblank_inmodeset[crtc] & 0x2)
+ drm_vblank_put(dev, crtc);
+
+ dev->vblank_inmodeset[crtc] = 0;
+ }
+}
+
+/**
* drm_modeset_ctl - handle vblank event counter changes across mode switch
* @DRM_IOCTL_ARGS: standard ioctl arguments
*
@@ -400,182 +1221,305 @@ drm_vblank_put(struct drm_device *dev, int crtc)
* enabled around this call, we don't have to do anything since the counter
* will have already been incremented.
*/
-/*ARGSUSED*/
-int
-drm_modeset_ctl(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_modeset_ctl(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- struct drm_modeset_ctl modeset;
- int crtc, ret = 0;
+ struct drm_modeset_ctl *modeset = data;
+ unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
- goto out;
+ return 0;
- DRM_COPYFROM_WITH_RETURN(&modeset, (void *)data,
- sizeof (modeset));
+ /* KMS drivers handle this internally */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
- crtc = modeset.crtc;
- if (crtc >= dev->num_crtcs) {
- ret = -EINVAL;
- goto out;
- }
+ crtc = modeset->crtc;
+ if (crtc >= dev->num_crtcs)
+ return -EINVAL;
- /*
- * To avoid all the problems that might happen if interrupts
- * were enabled/disabled around or between these calls, we just
- * have the kernel take a reference on the CRTC (just once though
- * to avoid corrupting the count if multiple, mismatch calls occur),
- * so that interrupts remain enabled in the interim.
- */
- switch (modeset.cmd) {
+ switch (modeset->cmd) {
case _DRM_PRE_MODESET:
- if (!dev->vblank_inmodeset[crtc]) {
- dev->vblank_inmodeset[crtc] = 1;
- ret = drm_vblank_get(dev, crtc);
- }
+ drm_vblank_pre_modeset(dev, crtc);
break;
case _DRM_POST_MODESET:
- if (dev->vblank_inmodeset[crtc]) {
- DRM_SPINLOCK(&dev->vbl_lock);
- dev->vblank_disable_allowed = 1;
- dev->vblank_inmodeset[crtc] = 0;
- DRM_SPINUNLOCK(&dev->vbl_lock);
- drm_vblank_put(dev, crtc);
- }
+ drm_vblank_post_modeset(dev, crtc);
break;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-out:
- return (ret);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_wait_vblank(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_wait_vblank_t vblwait;
- int ret, flags, crtc;
- unsigned int sequence;
-
- if (!dev->irq_enabled) {
- DRM_ERROR("wait vblank, EINVAL");
- return (EINVAL);
- }
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_wait_vblank_32_t vblwait32;
- DRM_COPYFROM_WITH_RETURN(&vblwait32, (void *)data,
- sizeof (vblwait32));
- vblwait.request.type = vblwait32.request.type;
- vblwait.request.sequence = vblwait32.request.sequence;
- vblwait.request.signal = vblwait32.request.signal;
+static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+ union drm_wait_vblank *vblwait,
+ struct drm_file *file_priv)
+{
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned long flags;
+ unsigned int seq;
+ int ret;
+
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (e == NULL) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
+
+ e->pipe = pipe;
+ e->base.pid = ddi_get_pid();
+ e->event.base.type = DRM_EVENT_VBLANK;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = vblwait->request.signal;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy = (void (*) (void *, size_t)) kfree;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ if (file_priv->event_space < sizeof e->event) {
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+
+ file_priv->event_space -= sizeof e->event;
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
+
+ if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1 << 23)) {
+ vblwait->request.sequence = seq + 1;
+ vblwait->reply.sequence = vblwait->request.sequence;
+ }
+
+ DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+ vblwait->request.sequence, seq, pipe);
+
+ e->event.sequence = vblwait->request.sequence;
+ if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+ drm_vblank_put(dev, pipe);
+ send_vblank_event(dev, e, seq, &now);
+ pollwakeup(&e->base.file_priv->drm_pollhead, POLLIN | POLLRDNORM);
+ vblwait->reply.sequence = seq;
} else {
-#endif
- DRM_COPYFROM_WITH_RETURN(&vblwait, (void *)data,
- sizeof (vblwait));
-#ifdef _MULTI_DATAMODEL
+ /* drm_handle_vblank_events will call drm_vblank_put */
+ list_add_tail(&e->base.link, &dev->vblank_event_list, (caddr_t)&e->base);
+ vblwait->reply.sequence = vblwait->request.sequence;
}
-#endif
- if (vblwait.request.type &
- ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
- DRM_ERROR("drm_wait_vblank: wrong request type 0x%x",
- vblwait.request.type);
- return (EINVAL);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return 0;
+err_unlock:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e, sizeof(*e));
+err_put:
+ drm_vblank_put(dev, pipe);
+ return ret;
+}
+
+/**
+ * Wait for VBLANK.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param data user argument, pointing to a drm_wait_vblank structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * This function enables the vblank interrupt on the pipe requested, then
+ * sleeps waiting for the requested sequence number to occur, and drops
+ * the vblank interrupt refcount afterwards. (vblank irq disable follows that
+ * after a timeout with no further vblank waits scheduled).
+ */
+/* LINTED */
+int drm_wait_vblank(DRM_IOCTL_ARGS)
+{
+ union drm_wait_vblank *vblwait = data;
+ int ret = 0;
+ unsigned int flags, seq, crtc, high_crtc;
+
+ if ((!dev->pdev->irq) || (!dev->irq_enabled))
+ return -EINVAL;
+
+ if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
+ return -EINVAL;
+
+ if (vblwait->request.type &
+ ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK)) {
+ DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+ vblwait->request.type,
+ (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK));
+ return -EINVAL;
}
- flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
+ flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+ high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
+ if (high_crtc)
+ crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+ else
crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
- if (crtc >= dev->num_crtcs) {
- DRM_ERROR("wait vblank operation not support");
- return (ENOTSUP);
- }
+ if (crtc >= dev->num_crtcs)
+ return -EINVAL;
+
ret = drm_vblank_get(dev, crtc);
if (ret) {
- DRM_ERROR("can't get drm vblank %d", ret);
- return (ret);
+ DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
+ return ret;
}
- sequence = drm_vblank_count(dev, crtc);
+ seq = drm_vblank_count(dev, crtc);
- switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) {
+ switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
- vblwait.request.sequence += sequence;
- vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
- /*FALLTHROUGH*/
+ vblwait->request.sequence += seq;
+ vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ /* LINTED */
case _DRM_VBLANK_ABSOLUTE:
break;
default:
- DRM_DEBUG("wait vblank return EINVAL");
- return (EINVAL);
+ ret = -EINVAL;
+ goto done;
}
- if ((flags & _DRM_VBLANK_NEXTONMISS) &&
- (sequence - vblwait.request.sequence) <= (1<<23)) {
- vblwait.request.sequence = sequence + 1;
+ if (flags & _DRM_VBLANK_EVENT) {
+ /* must hold on to the vblank ref until the event fires
+ * drm_vblank_put will be called asynchronously
+ */
+ return drm_queue_vblank_event(dev, crtc, vblwait, file);
}
- if (flags & _DRM_VBLANK_SIGNAL) {
- /*
- * Don't block process, send signal when vblank interrupt
- */
- DRM_ERROR("NOT SUPPORT YET, SHOULD BE ADDED");
- cmn_err(CE_WARN, "NOT SUPPORT YET, SHOULD BE ADDED");
- ret = EINVAL;
- goto done;
- } else {
- /* block until vblank interupt */
- /* shared code returns -errno */
- DRM_WAIT_ON(ret, &dev->vbl_queues[crtc], 3 * DRM_HZ,
- (((drm_vblank_count(dev, crtc)
- - vblwait.request.sequence) <= (1 << 23)) ||
- !dev->irq_enabled));
- if (ret != EINTR) {
- struct timeval now;
- (void) uniqtime(&now);
- vblwait.reply.tval_sec = now.tv_sec;
- vblwait.reply.tval_usec = now.tv_usec;
- vblwait.reply.sequence = drm_vblank_count(dev, crtc);
- }
+ if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1<<23)) {
+ vblwait->request.sequence = seq + 1;
}
-done:
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_wait_vblank_32_t vblwait32;
- vblwait32.reply.type = vblwait.reply.type;
- vblwait32.reply.sequence = vblwait.reply.sequence;
- vblwait32.reply.tval_sec = (int32_t)vblwait.reply.tval_sec;
- vblwait32.reply.tval_usec = (int32_t)vblwait.reply.tval_usec;
- DRM_COPYTO_WITH_RETURN((void *)data, &vblwait32,
- sizeof (vblwait32));
+ DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
+ vblwait->request.sequence, crtc);
+ dev->last_vblank_wait[crtc] = vblwait->request.sequence;
+ DRM_WAIT_ON(ret, &dev->vbl_queue[crtc], 3 * DRM_HZ,
+ (((drm_vblank_count(dev, crtc) -
+ vblwait->request.sequence) <= (1 << 23)) ||
+ !dev->irq_enabled));
+
+ if (ret != -EINTR) {
+ struct timeval now;
+
+ vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
+
+ vblwait->reply.tval_sec = now.tv_sec;
+ vblwait->reply.tval_usec = now.tv_usec;
+ DRM_DEBUG("returning %d to client\n",
+ vblwait->reply.sequence);
} else {
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data, &vblwait,
- sizeof (vblwait));
-#ifdef _MULTI_DATAMODEL
+ DRM_DEBUG("vblank wait interrupted by signal\n");
}
-#endif
+done:
drm_vblank_put(dev, crtc);
- return (ret);
+ return ret;
}
-
-/*ARGSUSED*/
-void
-drm_vbl_send_signals(drm_device_t *dev)
+static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
{
- DRM_DEBUG("drm_vbl_send_signals");
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned long flags;
+ unsigned int seq;
+
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ list_for_each_entry_safe(e, t, struct drm_pending_vblank_event,
+ &dev->vblank_event_list, base.link) {
+ if (e->pipe != crtc)
+ continue;
+ if ((seq - e->event.sequence) > (1<<23))
+ continue;
+
+ DRM_DEBUG("vblank event on %d, current %d\n",
+ e->event.sequence, seq);
+
+ list_del(&e->base.link);
+ drm_vblank_put(dev, e->pipe);
+ send_vblank_event(dev, e, seq, &now);
+ pollwakeup(&e->base.file_priv->drm_pollhead, POLLIN | POLLRDNORM);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
-void
-drm_handle_vblank(struct drm_device *dev, int crtc)
+/**
+ * drm_handle_vblank - handle a vblank event
+ * @dev: DRM device
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ */
+bool drm_handle_vblank(struct drm_device *dev, int crtc)
{
- atomic_inc(&dev->_vblank_count[crtc]);
- DRM_WAKEUP(&dev->vbl_queues[crtc]);
+ u32 vblcount;
+ s64 diff_ns;
+ struct timeval tvblank;
+ unsigned long irqflags;
+
+ if (!dev->num_crtcs)
+ return false;
+
+ /* Need timestamp lock to prevent concurrent execution with
+ * vblank enable/disable, as this would cause inconsistent
+ * or corrupted timestamps and vblank counts.
+ */
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+ /* Vblank irq handling disabled. Nothing to do. */
+ if (!dev->vblank_enabled[crtc]) {
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ return false;
+ }
+
+ /* Fetch corresponding timestamp for this vblank interval from
+ * driver and store it in proper slot of timestamp ringbuffer.
+ */
+
+ /* Get current timestamp and count. */
+ vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ (void) drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+
+ /* Compute time difference to timestamp of last vblank */
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* Update vblank timestamp and count if at least
+ * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
+ * difference between last stored timestamp and current
+ * timestamp. A smaller difference means basically
+ * identical timestamps. Happens if this vblank has
+ * been already processed and this is a redundant call,
+ * e.g., due to spurious vblank interrupts. We need to
+ * ignore those for accounting.
+ */
+ if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+ /* Store new timestamp in ringbuffer. */
+ vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
+
+ /* Increment cooked vblank count. This also atomically commits
+ * the timestamp computed above.
+ */
+ atomic_inc(&dev->_vblank_count[crtc]);
+ } else {
+ DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
+ crtc, (int) diff_ns);
+ }
+
+ DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ drm_handle_vblank_events(dev, crtc);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ return true;
}
diff --git a/usr/src/uts/common/io/drm/drm_kstat.c b/usr/src/uts/common/io/drm/drm_kstat.c
index 23e51fe..c286780 100644
--- a/usr/src/uts/common/io/drm/drm_kstat.c
+++ b/usr/src/uts/common/io/drm/drm_kstat.c
@@ -1,30 +1,30 @@
/*
- * CDDL HEADER START
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
*
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
*
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
+
/*
- * Copyright 2008 Sun Microsystems, Inc.
- * All rights reserved. Use is subject to license terms.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "drmP.h"
#include <sys/kstat.h>
#include <sys/ddi.h>
@@ -43,7 +43,7 @@ static char *drmkstat_name[] = {
static int
drm_kstat_update(kstat_t *ksp, int flag)
{
- drm_device_t *sc;
+ struct drm_device *sc;
kstat_named_t *knp;
int tmp;
@@ -61,7 +61,7 @@ drm_kstat_update(kstat_t *ksp, int flag)
}
int
-drm_init_kstats(drm_device_t *sc)
+drm_init_kstats(struct drm_device *sc)
{
int instance;
kstat_t *ksp;
@@ -69,7 +69,7 @@ drm_init_kstats(drm_device_t *sc)
char *np;
char **aknp;
- instance = ddi_get_instance(sc->dip);
+ instance = ddi_get_instance(sc->devinfo);
aknp = drmkstat_name;
ksp = kstat_create("drm", instance, "drminfo", "drm",
KSTAT_TYPE_NAMED, sizeof (drmkstat_name)/sizeof (char *) - 1,
@@ -90,7 +90,7 @@ drm_init_kstats(drm_device_t *sc)
}
void
-drm_fini_kstats(drm_device_t *sc)
+drm_fini_kstats(struct drm_device *sc)
{
if (sc->asoft_ksp)
kstat_delete(sc->asoft_ksp);
diff --git a/usr/src/uts/common/io/drm/drm_linux.c b/usr/src/uts/common/io/drm/drm_linux.c
new file mode 100644
index 0000000..4f1bc6d
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_linux.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include "drm_linux.h"
+
+void
+kref_init(struct kref *kref)
+{
+ atomic_set(&kref->refcount, 1);
+}
+
+void
+kref_get(struct kref *kref)
+{
+ atomic_inc(&kref->refcount);
+}
+
+void
+kref_put(struct kref *kref, void (*release)(struct kref *kref))
+{
+ if (!atomic_dec_uint_nv(&kref->refcount))
+ release(kref);
+}
+
+unsigned int
+hweight16(unsigned int w)
+{
+ w = (w & 0x5555) + ((w >> 1) & 0x5555);
+ w = (w & 0x3333) + ((w >> 2) & 0x3333);
+ w = (w & 0x0F0F) + ((w >> 4) & 0x0F0F);
+ w = (w & 0x00FF) + ((w >> 8) & 0x00FF);
+ return (w);
+}
+
+long
+IS_ERR(const void *ptr)
+{
+ return ((unsigned long)ptr >= (unsigned long)-255);
+}
+
+#ifdef NEVER
+/*
+ * kfree wrapper to Solaris VM.
+ */
+void
+kfree(void *buf, size_t size) {
+}
+#endif
diff --git a/usr/src/uts/common/io/drm/drm_linux_list.h b/usr/src/uts/common/io/drm/drm_linux_list.h
deleted file mode 100644
index 02a4809..0000000
--- a/usr/src/uts/common/io/drm/drm_linux_list.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * drm_linux_list.h -- linux list functions for the BSDs.
- * Created: Mon Apr 7 14:30:16 1999 by anholt@FreeBSD.org
- */
-/*
- * -
- * Copyright 2003 Eric Anholt
- * Copyright (c) 2009, Intel Corporation.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <anholt@FreeBSD.org>
- *
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _DRM_LINUX_LIST_H_
-#define _DRM_LINUX_LIST_H_
-
-struct list_head {
- struct list_head *next, *prev;
- caddr_t contain_ptr;
-};
-
-/* Cheat, assume the list_head is at the start of the struct */
-#define list_entry(entry, type, member) (type *)(uintptr_t)(entry->contain_ptr)
-
-#define INIT_LIST_HEAD(head) { \
- (head)->next = head; \
- (head)->prev = head; \
- (head)->contain_ptr = (caddr_t)head; \
-}
-
-#define list_add(entry, head, con_ptr) { \
- (head)->next->prev = entry; \
- (entry)->next = (head)->next; \
- (entry)->prev = head; \
- (head)->next = entry; \
- (entry)->contain_ptr = con_ptr; \
-}
-
-#define list_add_tail(entry, head, con_ptr) { \
- (entry)->prev = (head)->prev; \
- (entry)->next = head; \
- (head)->prev->next = entry; \
- (head)->prev = entry; \
- (entry)->contain_ptr = con_ptr; \
-}
-
-#define list_del(entry) { \
- (entry)->next->prev = (entry)->prev; \
- (entry)->prev->next = (entry)->next; \
- (entry)->contain_ptr = NULL; \
-}
-
-#define list_for_each(entry, head) \
- for (entry = (head)->next; entry != head; entry = (entry)->next)
-
-#define list_for_each_safe(entry, temp, head) \
- for (entry = (head)->next, temp = (entry)->next; \
- entry != head; \
- entry = temp, temp = temp->next)
-
-#define list_del_init(entry) { \
- list_del(entry); \
- INIT_LIST_HEAD(entry); \
-}
-
-#define list_move_tail(entry, head, con_ptr) { \
- list_del(entry); \
- list_add_tail(entry, head, con_ptr); \
-}
-
-#define list_empty(head) ((head)->next == head)
-
-#endif /* _DRM_LINUX_LIST_H_ */
diff --git a/usr/src/uts/common/io/drm/drm_lock.c b/usr/src/uts/common/io/drm/drm_lock.c
index 6930a47..1e9f83f 100644
--- a/usr/src/uts/common/io/drm/drm_lock.c
+++ b/usr/src/uts/common/io/drm/drm_lock.c
@@ -1,13 +1,22 @@
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
*/
/*
- * lock.c -- IOCTLs for locking -*- linux-c -*-
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+/**
+ * \file drm_lock.c
+ * IOCTLs for locking
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
*/
+
/*
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -30,17 +39,130 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Lock ioctl.
*
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
*
+ * Add the current task to the lock wait queue, and attempt to take to lock.
*/
+/* LINTED */
+int drm_lock(DRM_IOCTL_ARGS)
+{
+ struct drm_lock *lock = data;
+ struct drm_master *master = file->master;
+ int ret = 0;
-#include "drmP.h"
+ ++file->lock_count;
+
+ if (lock->context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ DRM_CURRENTPID, lock->context);
+ return -EINVAL;
+ }
+
+ if (master->lock.hw_lock == NULL)
+ return -EINVAL;
+
+ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock->context, DRM_CURRENTPID,
+ master->lock.hw_lock->lock, lock->flags);
+
+ if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
+ if (lock->context < 0)
+ return -EINVAL;
+
+ mutex_enter(&master->lock.lock_mutex);
+ master->lock.user_waiters++;
+ for (;;) {
+ if (drm_lock_take(&master->lock, lock->context)) {
+ master->lock.file_priv = file;
+ master->lock.lock_time = ddi_get_lbolt();
+ atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+ break; /* Got lock */
+ }
+
+ ret = cv_wait_sig(&master->lock.lock_cv,
+ &master->lock.lock_mutex);
+ if (ret == 0) {
+ ret = -EINTR;
+ break;
+ }
+ }
+ master->lock.user_waiters--;
+ mutex_exit(&master->lock.lock_mutex);
+
+ DRM_DEBUG("%d %s\n", lock->context,
+ ret ? "interrupted" : "has lock");
+ if (ret) return ret;
+
+ if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
+ {
+ if (dev->driver->dma_quiescent(dev)) {
+ DRM_DEBUG("%d waiting for DMA quiescent\n",
+ lock->context);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
-int
-drm_lock_take(drm_lock_data_t *lock_data, unsigned int context)
+/**
+ * Unlock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Transfer and free the lock.
+ */
+/* LINTED */
+int drm_unlock(DRM_IOCTL_ARGS)
+{
+ struct drm_lock *lock = data;
+ struct drm_master *master = file->master;
+
+ if (lock->context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ DRM_CURRENTPID, lock->context);
+ return -EINVAL;
+ }
+
+ atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+ /* kernel_context_switch isn't used by any of the x86 drm
+ * modules but is required by the Sparc driver.
+ */
+ /* LINTED */
+ if (drm_lock_free(&master->lock, lock->context)) {
+ /* FIXME: Should really bail out here. */
+ }
+
+ return 0;
+}
+
+/**
+ * Take the heavyweight lock.
+ *
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return one if the lock is held, or zero otherwise.
+ *
+ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+int drm_lock_take(struct drm_lock_data *lock_data,
+ unsigned int context)
{
unsigned int old, new;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
@@ -49,142 +171,148 @@ drm_lock_take(drm_lock_data_t *lock_data, unsigned int context)
old = *lock;
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
- else
- new = context | _DRM_LOCK_HELD;
+ else {
+ new = context | _DRM_LOCK_HELD |
+ ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+ _DRM_LOCK_CONT : 0);
+ }
} while (!atomic_cmpset_int(lock, old, new));
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
- context);
+ context);
}
- return (0);
+ return 0;
}
}
- if (new == (context | _DRM_LOCK_HELD)) {
+
+ if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
/* Have lock */
- return (1);
+ return 1;
}
- return (0);
+ return 0;
}
-/*
+/**
* This takes a lock forcibly and hands it to context. Should ONLY be used
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
+ *
+ * \param dev DRM device.
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return always one.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as held by the given context, via the \p cmpxchg instruction.
*/
-int
-drm_lock_transfer(drm_device_t *dev, drm_lock_data_t *lock_data,
- unsigned int context)
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
+ unsigned int context)
{
unsigned int old, new;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
- dev->lock.filp = NULL;
+ lock_data->file_priv = NULL;
do {
- old = *lock;
- new = context | _DRM_LOCK_HELD;
+ old = *lock;
+ new = context | _DRM_LOCK_HELD;
} while (!atomic_cmpset_int(lock, old, new));
-
- return (1);
+ return 1;
}
-int
-drm_lock_free(drm_device_t *dev, volatile unsigned int *lock,
- unsigned int context)
+/**
+ * Free lock.
+ *
+ * \param dev DRM device.
+ * \param lock lock.
+ * \param context context.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
+ * waiting on the lock queue.
+ */
+int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
{
unsigned int old, new;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+ mutex_enter(&lock_data->lock_mutex);
+ if (lock_data->kernel_waiters != 0) {
+ (void) drm_lock_transfer(lock_data, 0);
+ lock_data->idle_has_lock = 1;
+ mutex_exit(&lock_data->lock_mutex);
+ return 1;
+ }
- mutex_enter(&(dev->lock.lock_mutex));
- dev->lock.filp = NULL;
do {
- old = *lock;
- new = 0;
+ old = *lock;
+ new = _DRM_LOCKING_CONTEXT(old);
} while (!atomic_cmpset_int(lock, old, new));
- if (_DRM_LOCK_IS_HELD(old) &&
- (_DRM_LOCKING_CONTEXT(old) != context)) {
+ if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d\n",
- context, _DRM_LOCKING_CONTEXT(old));
- mutex_exit(&(dev->lock.lock_mutex));
- return (1);
+ context, _DRM_LOCKING_CONTEXT(old));
+ mutex_exit(&lock_data->lock_mutex);
+ return 1;
}
- cv_broadcast(&(dev->lock.lock_cv));
- mutex_exit(&(dev->lock.lock_mutex));
- return (0);
+ cv_broadcast(&lock_data->lock_cv);
+ mutex_exit(&lock_data->lock_mutex);
+ return 0;
}
-/*ARGSUSED*/
-int
-drm_lock(DRM_IOCTL_ARGS)
+/**
+ * This function returns immediately and takes the hw lock
+ * with the kernel context if it is free, otherwise it gets the highest priority when and if
+ * it is eventually released.
+ *
+ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
+ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
+ * a deadlock, which is why the "idlelock" was invented).
+ *
+ * This should be sufficient to wait for GPU idle without
+ * having to worry about starvation.
+ */
+
+void drm_idlelock_take(struct drm_lock_data *lock_data)
{
- DRM_DEVICE;
- drm_lock_t lock;
int ret = 0;
- DRM_COPYFROM_WITH_RETURN(&lock, (void *)data, sizeof (lock));
-
- if (lock.context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- DRM_CURRENTPID, lock.context);
- return (EINVAL);
- }
+ mutex_enter(&lock_data->lock_mutex);
+ lock_data->kernel_waiters++;
+ if (!lock_data->idle_has_lock) {
- DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
- lock.context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
- lock.flags);
- if (dev->driver->use_dma_queue && lock.context < 0)
- return (EINVAL);
+ ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
- mutex_enter(&(dev->lock.lock_mutex));
- for (;;) {
- if (drm_lock_take(&dev->lock, lock.context)) {
- dev->lock.filp = fpriv;
- dev->lock.lock_time = ddi_get_lbolt();
- break; /* Got lock */
- }
- ret = cv_wait_sig(&(dev->lock.lock_cv),
- &(dev->lock.lock_mutex));
-
- if (ret == 0) {
- mutex_exit(&(dev->lock.lock_mutex));
- return (EINTR);
- }
+ if (ret == 1)
+ lock_data->idle_has_lock = 1;
}
- mutex_exit(&(dev->lock.lock_mutex));
- DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
-
- if (dev->driver->dma_quiescent != NULL &&
- (lock.flags & _DRM_LOCK_QUIESCENT))
- dev->driver->dma_quiescent(dev);
-
- return (0);
+ mutex_exit(&(lock_data->lock_mutex));
}
-/*ARGSUSED*/
-int
-drm_unlock(DRM_IOCTL_ARGS)
+void drm_idlelock_release(struct drm_lock_data *lock_data)
{
- DRM_DEVICE;
- drm_lock_t lock;
-
- DRM_COPYFROM_WITH_RETURN(&lock, (void *)data, sizeof (lock));
-
- DRM_DEBUG("%d (pid %d) requests unlock (0x%08x), flags = 0x%08x\n",
- lock.context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
- lock.flags);
+ unsigned int old;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
- if (lock.context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- DRM_CURRENTPID, lock.context);
- return (EINVAL);
+ mutex_enter(&lock_data->lock_mutex);
+ if (--lock_data->kernel_waiters == 0) {
+ if (lock_data->idle_has_lock) {
+ do {
+ old = *lock;
+ } while (!atomic_cmpset_int(lock, old, DRM_KERNEL_CONTEXT));
+ cv_broadcast(&lock_data->lock_cv);
+ lock_data->idle_has_lock = 0;
+ }
}
- atomic_inc_32(&dev->counts[_DRM_STAT_UNLOCKS]);
+ mutex_exit(&lock_data->lock_mutex);
+}
- DRM_LOCK();
- if (drm_lock_free(dev, &dev->lock.hw_lock->lock, lock.context)) {
- DRM_ERROR("drm_unlock\n");
- }
- DRM_UNLOCK();
- return (0);
+/* LINTED */
+int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_master *master = file_priv->master;
+ return (file_priv->lock_count && master->lock.hw_lock &&
+ _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
+ master->lock.file_priv == file_priv);
}
diff --git a/usr/src/uts/common/io/drm/drm_memory.c b/usr/src/uts/common/io/drm/drm_memory.c
index cf2d5f6..2caa401 100644
--- a/usr/src/uts/common/io/drm/drm_memory.c
+++ b/usr/src/uts/common/io/drm/drm_memory.c
@@ -1,6 +1,5 @@
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*/
/*
@@ -10,7 +9,7 @@
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2012, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,44 +38,40 @@
*/
#include "drmP.h"
+#ifdef __x86
+#include "drm_linux_list.h"
+#endif
/* Device memory access structure */
+
typedef struct drm_device_iomap {
- uint_t physical; /* physical address */
- uint_t size; /* size of mapping */
- uint_t drm_regnum; /* register number */
- caddr_t drm_base; /* kernel virtual address */
- ddi_acc_handle_t drm_handle; /* data access handle */
+ uint_t paddr; /* physical address */
+ uint_t size; /* size of mapping */
+ caddr_t kvaddr; /* kernel virtual address */
+ ddi_acc_handle_t acc_handle; /* data access handle */
} drm_device_iomap_t;
-void
-drm_mem_init(void)
-{
-}
-
-void
-drm_mem_uninit(void)
-{
-}
-
-/*ARGSUSED*/
void *
drm_alloc(size_t size, int area)
{
+ _NOTE(ARGUNUSED(area))
+
return (kmem_zalloc(1 * size, KM_NOSLEEP));
}
-/*ARGSUSED*/
void *
drm_calloc(size_t nmemb, size_t size, int area)
{
+ _NOTE(ARGUNUSED(area))
+
return (kmem_zalloc(size * nmemb, KM_NOSLEEP));
}
-/*ARGSUSED*/
void *
drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
{
+ _NOTE(ARGUNUSED(area))
+
void *pt;
pt = kmem_zalloc(1 * size, KM_NOSLEEP);
@@ -85,137 +80,192 @@ drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
return (NULL);
}
if (oldpt && oldsize) {
- bcopy(pt, oldpt, oldsize);
+ bcopy(oldpt, pt, min(oldsize, size));
kmem_free(oldpt, oldsize);
}
return (pt);
}
-/*ARGSUSED*/
void
drm_free(void *pt, size_t size, int area)
{
- kmem_free(pt, size);
+ _NOTE(ARGUNUSED(area))
+
+ if (pt)
+ kmem_free(pt, size);
}
-/*ARGSUSED*/
int
-drm_get_pci_index_reg(dev_info_t *devi, uint_t physical, uint_t size,
- off_t *off)
+drm_get_pci_index_reg(dev_info_t *dip, uint_t paddr, uint_t size, off_t *off)
{
- int length;
- pci_regspec_t *regs;
- int n_reg, i;
- int regnum;
- uint_t base, regsize;
+ _NOTE(ARGUNUSED(size))
+
+ pci_regspec_t *regs = NULL;
+ int len;
+ uint_t regbase, regsize;
+ int nregs, i;
+ int regnum;
regnum = -1;
- if (ddi_dev_nregs(devi, &n_reg) == DDI_FAILURE) {
- DRM_ERROR("drm_get_pci_index_reg:ddi_dev_nregs failed\n");
- n_reg = 0;
+ if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE) {
+ DRM_ERROR("ddi_dev_nregs() failed");
return (-1);
}
- if (ddi_getlongprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
- "assigned-addresses", (caddr_t)&regs, &length) !=
- DDI_PROP_SUCCESS) {
- DRM_ERROR("drm_get_pci_index_reg: ddi_getlongprop failed!\n");
- goto error;
+ if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
+ "assigned-addresses", (caddr_t)&regs, &len) != DDI_PROP_SUCCESS) {
+ DRM_ERROR("ddi_getlongprop() failed");
+ if (regs)
+ kmem_free(regs, (size_t)len);
+ return (-1);
}
- for (i = 0; i < n_reg; i ++) {
- base = (uint_t)regs[i].pci_phys_low;
+ for (i = 0; i < nregs; i ++) {
+ regbase = (uint_t)regs[i].pci_phys_low;
regsize = (uint_t)regs[i].pci_size_low;
- if ((uint_t)physical >= base &&
- (uint_t)physical < (base + regsize)) {
+ if ((uint_t)paddr >= regbase &&
+ (uint_t)paddr < (regbase + regsize)) {
regnum = i + 1;
- *off = (off_t)(physical - base);
+ *off = (off_t)(paddr - regbase);
break;
}
}
- kmem_free(regs, (size_t)length);
+ if (regs)
+ kmem_free(regs, (size_t)len);
return (regnum);
-error:
- kmem_free(regs, (size_t)length);
- return (-1);
}
/* data access attributes structure for register access */
static ddi_device_acc_attr_t dev_attr = {
DDI_DEVICE_ATTR_V0,
+#ifdef _BIG_ENDIAN
+ DDI_STRUCTURE_LE_ACC,
+#else
DDI_NEVERSWAP_ACC,
+#endif
DDI_STRICTORDER_ACC,
+ DDI_FLAGERR_ACC
};
-int
-do_ioremap(dev_info_t *devi, drm_device_iomap_t *iomap)
+static int
+__ioremap(dev_info_t *dip, drm_device_iomap_t *iomap)
{
- int regnum;
off_t offset;
+ int regnum;
int ret;
- regnum = drm_get_pci_index_reg(devi, iomap->physical,
- iomap->size, &offset);
+ regnum = drm_get_pci_index_reg(
+ dip, iomap->paddr, iomap->size, &offset);
if (regnum < 0) {
- DRM_ERROR("do_ioremap: can not find regster entry,"
- " start=0x%x, size=0x%x", iomap->physical, iomap->size);
- return (ENXIO);
+ DRM_ERROR("can not find register entry: "
+ "paddr=0x%x, size=0x%x", iomap->paddr, iomap->size);
+ return -ENXIO;
}
- iomap->drm_regnum = regnum;
-
- ret = ddi_regs_map_setup(devi, iomap->drm_regnum,
- (caddr_t *)&(iomap->drm_base), (offset_t)offset,
- (offset_t)iomap->size, &dev_attr, &iomap->drm_handle);
- if (ret < 0) {
- DRM_ERROR("do_ioremap: failed to map regs: regno=%d,"
- " offset=0x%x", regnum, offset);
- iomap->drm_handle = NULL;
- return (EFAULT);
+ ret = ddi_regs_map_setup(dip, regnum,
+ (caddr_t *)&(iomap->kvaddr), (offset_t)offset,
+ (offset_t)iomap->size, &dev_attr, &iomap->acc_handle);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("failed to map regs: "
+ "regnum=%d, offset=0x%lx", regnum, offset);
+ iomap->acc_handle = NULL;
+ return -EFAULT;
}
- return (0);
+ return 0;
}
int
-drm_ioremap(drm_device_t *softstate, drm_local_map_t *map)
+drm_ioremap(struct drm_device *dev, struct drm_local_map *map)
{
- drm_device_iomap_t iomap;
+ struct drm_device_iomap iomap;
int ret;
- DRM_DEBUG("drm_ioremap called\n");
+ DRM_DEBUG("\n");
- bzero(&iomap, sizeof (drm_device_iomap_t));
- iomap.physical = map->offset;
+ iomap.paddr = map->offset;
iomap.size = map->size;
- ret = do_ioremap(softstate->dip, &iomap);
+ ret = __ioremap(dev->devinfo, &iomap);
if (ret) {
- DRM_ERROR("drm_ioremap: failed, physaddr=0x%x, size=0x%x",
+ DRM_ERROR("__ioremap failed: paddr=0x%lx, size=0x%lx",
map->offset, map->size);
return (ret);
}
- /* ddi_acc_handle_t */
- map->dev_handle = iomap.drm_handle;
- map->handle = (void *)iomap.drm_base;
- map->dev_addr = iomap.drm_base;
+ map->handle = (void *)iomap.kvaddr;
+ map->acc_handle = iomap.acc_handle;
DRM_DEBUG(
- "map->handle is %p map->dev_addr is %lx map->size %x",
- (void *)map->handle, (unsigned long)map->dev_addr, map->size);
+ "map->handle=%p, map->size=%lx",
+ (void *)map->handle, map->size);
return (0);
}
void
-drm_ioremapfree(drm_local_map_t *map)
+drm_ioremapfree(struct drm_local_map *map)
{
- if (map->dev_handle == NULL) {
- DRM_ERROR("drm_ioremapfree: handle is NULL");
- return;
+ if (map->acc_handle)
+ ddi_regs_map_free(&map->acc_handle);
+}
+
+#ifdef __x86
+struct drm_iomem {
+ void *addr;
+ size_t size;
+ struct list_head head;
+};
+
+struct list_head drm_iomem_list;
+
+void *
+drm_sun_ioremap(uint64_t paddr, size_t size, uint32_t mode)
+{
+ struct drm_iomem *iomem;
+ void *addr;
+
+ if (mode == DRM_MEM_CACHED)
+ mode = GFXP_MEMORY_CACHED;
+ else if (mode == DRM_MEM_UNCACHED)
+ mode = GFXP_MEMORY_UNCACHED;
+ else if (mode == DRM_MEM_WC)
+ mode = GFXP_MEMORY_WRITECOMBINED;
+ else
+ return (NULL);
+
+ addr = (void *)gfxp_alloc_kernel_space(size);
+ if(!addr)
+ return (NULL);
+ gfxp_load_kernel_space(paddr, size, mode, addr);
+ iomem = kmem_zalloc(sizeof(*iomem), KM_NOSLEEP);
+ if(!iomem){
+ gfxp_unmap_kernel_space(addr, size);
+ return (NULL);
+ }
+ iomem->addr = addr;
+ iomem->size = size;
+
+ INIT_LIST_HEAD(&iomem->head);
+ list_add(&iomem->head, &drm_iomem_list, (caddr_t)iomem);
+
+ return (addr);
+}
+
+void
+drm_sun_iounmap(void *addr)
+{
+ struct drm_iomem *iomem;
+
+ list_for_each_entry(iomem, struct drm_iomem, &drm_iomem_list, head) {
+ if (iomem->addr == addr) {
+ gfxp_unmap_kernel_space(addr, iomem->size);
+ list_del(&iomem->head);
+ kmem_free(iomem, sizeof(*iomem));
+ break;
+ }
}
- ddi_regs_map_free(&map->dev_handle);
}
+#endif /* x86 */
diff --git a/usr/src/uts/common/io/drm/drm_mm.c b/usr/src/uts/common/io/drm/drm_mm.c
index d2d70c4..4fe27ff 100644
--- a/usr/src/uts/common/io/drm/drm_mm.c
+++ b/usr/src/uts/common/io/drm/drm_mm.c
@@ -1,18 +1,22 @@
/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009, 2013, Intel Corporation.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files(the
+ * copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
- * The above copyright notice and this permission notice(including the
+ * The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
@@ -25,312 +29,791 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
- */
+ **************************************************************************/
/*
* Generic simple memory manager implementation. Intended to be used as a base
* class implementation for more advanced memory managers.
*
* Note that the algorithm used is quite simple and there might be substantial
- * performance gains if a smarter free list is implemented.
- * Currently it is just an
+ * performance gains if a smarter free list is implemented. Currently it is just an
* unordered stack of free regions. This could easily be improved if an RB-tree
* is used instead. At least if we expect heavy fragmentation.
*
* Aligned allocations can also see improvement.
*
* Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
+#include "drm_mm.h"
+#define MM_UNUSED_TARGET 4
-unsigned long
-drm_mm_tail_space(struct drm_mm *mm)
+static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
+ struct drm_mm_node *child;
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free)
- return (0);
+ if (atomic)
+ child = kzalloc(sizeof(*child), GFP_ATOMIC);
+ else
+ child = kzalloc(sizeof(*child), GFP_KERNEL);
+
+ if (unlikely(child == NULL)) {
+ spin_lock(&mm->unused_lock);
+ if (list_empty(&mm->unused_nodes))
+ child = NULL;
+ else {
+ child =
+ list_entry(mm->unused_nodes.next,
+ struct drm_mm_node, node_list);
+ list_del(&child->node_list);
+ --mm->num_unused;
+ }
+ spin_unlock(&mm->unused_lock);
+ }
+ return child;
+}
- return (entry->size);
+/* drm_mm_pre_get() - pre allocate drm_mm_node structure
+ * drm_mm: memory manager struct we are pre-allocating for
+ *
+ * Returns 0 on success or -ENOMEM if allocation fails.
+ */
+int drm_mm_pre_get(struct drm_mm *mm)
+{
+ struct drm_mm_node *node;
+
+ spin_lock(&mm->unused_lock);
+ while (mm->num_unused < MM_UNUSED_TARGET) {
+ spin_unlock(&mm->unused_lock);
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ spin_lock(&mm->unused_lock);
+
+ if (unlikely(node == NULL)) {
+ int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
+ spin_unlock(&mm->unused_lock);
+ return ret;
+ }
+ ++mm->num_unused;
+ list_add_tail(&node->node_list, &mm->unused_nodes, (caddr_t)node);
+ }
+ spin_unlock(&mm->unused_lock);
+ return 0;
}
-int
-drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
+static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color)
{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+ unsigned long adj_start = hole_start;
+ unsigned long adj_end = hole_end;
+
+ BUG_ON(node->allocated);
+
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+
+ if (alignment) {
+ unsigned tmp = adj_start % alignment;
+ if (tmp)
+ adj_start += alignment - tmp;
+ }
+
+ if (adj_start == hole_start) {
+ hole_node->hole_follows = 0;
+ list_del(&hole_node->hole_stack);
+ }
+
+ node->start = adj_start;
+ node->size = size;
+ node->mm = mm;
+ node->color = color;
+ node->allocated = 1;
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free)
- return (ENOMEM);
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list, (caddr_t)node);
- if (entry->size <= size)
- return (ENOMEM);
+ BUG_ON(node->start + node->size > adj_end);
- entry->size -= size;
- return (0);
+ node->hole_follows = 0;
+ if (__drm_mm_hole_node_start(node) < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack, (caddr_t)node);
+ node->hole_follows = 1;
+ }
}
+struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
+ unsigned long start,
+ unsigned long size,
+ bool atomic)
+{
+ struct drm_mm_node *hole, *node;
+ unsigned long end = start + size;
+ unsigned long hole_start;
+ unsigned long hole_end;
+
+ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+ if (hole_start > start || hole_end < end)
+ continue;
+
+ node = drm_mm_kmalloc(mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
+
+ node->start = start;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole->node_list, (caddr_t)node);
+
+ if (start == hole_start) {
+ hole->hole_follows = 0;
+ list_del_init(&hole->hole_stack);
+ }
+
+ node->hole_follows = 0;
+ if (end != hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack, (caddr_t)node);
+ node->hole_follows = 1;
+ }
+
+ return node;
+ }
+
+ DRM_ERROR("no hole found for block 0x%lx + 0x%lx\n", start, size);
+ return NULL;
+}
-static int
-drm_mm_create_tail_node(struct drm_mm *mm,
- unsigned long start,
- unsigned long size)
+struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ int atomic)
{
- struct drm_mm_node *child;
+ struct drm_mm_node *node;
+
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
- child = (struct drm_mm_node *)
- drm_alloc(sizeof (*child), DRM_MEM_MM);
- if (!child)
- return (ENOMEM);
+ drm_mm_insert_helper(hole_node, node, size, alignment, color);
- child->free = 1;
- child->size = size;
- child->start = start;
- child->mm = mm;
+ return node;
+}
+
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. The preallocated memory node
+ * must be cleared.
+ */
+int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color)
+{
+ struct drm_mm_node *hole_node;
- list_add_tail(&child->ml_entry, &mm->ml_entry, (caddr_t)child);
- list_add_tail(&child->fl_entry, &mm->fl_entry, (caddr_t)child);
+ hole_node = drm_mm_search_free_generic(mm, size, alignment,
+ color, 0);
+ if (!hole_node)
+ return -ENOSPC;
- return (0);
+ drm_mm_insert_helper(hole_node, node, size, alignment, color);
+ return 0;
}
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+ return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
+}
-int
-drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
+static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color,
+ unsigned long start, unsigned long end)
{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+ unsigned long adj_start = hole_start;
+ unsigned long adj_end = hole_end;
+
+ BUG_ON(!hole_node->hole_follows || node->allocated);
+
+ if (adj_start < start)
+ adj_start = start;
+ if (adj_end > end)
+ adj_end = end;
+
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+
+ if (alignment) {
+ unsigned tmp = adj_start % alignment;
+ if (tmp)
+ adj_start += alignment - tmp;
+ }
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free) {
- return (drm_mm_create_tail_node(mm,
- entry->start + entry->size, size));
+ if (adj_start == hole_start) {
+ hole_node->hole_follows = 0;
+ list_del(&hole_node->hole_stack);
+ }
+
+ node->start = adj_start;
+ node->size = size;
+ node->mm = mm;
+ node->color = color;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list, (caddr_t)node);
+
+ BUG_ON(node->start + node->size > adj_end);
+ BUG_ON(node->start + node->size > end);
+
+ node->hole_follows = 0;
+ if (__drm_mm_hole_node_start(node) < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack, (caddr_t)node);
+ node->hole_follows = 1;
}
- entry->size += size;
- return (0);
}
-static struct drm_mm_node *
-drm_mm_split_at_start(struct drm_mm_node *parent,
- unsigned long size)
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ int atomic)
{
- struct drm_mm_node *child;
+ struct drm_mm_node *node;
- child = (struct drm_mm_node *)
- drm_alloc(sizeof (*child), DRM_MEM_MM);
- if (!child)
- return (NULL);
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
- INIT_LIST_HEAD(&child->fl_entry);
+ drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
+ start, end);
- child->free = 0;
- child->size = size;
- child->start = parent->start;
- child->mm = parent->mm;
+ return node;
+}
- list_add_tail(&child->ml_entry, &parent->ml_entry, (caddr_t)child);
- INIT_LIST_HEAD(&child->fl_entry);
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. This is for range
+ * restricted allocations. The preallocated memory node must be cleared.
+ */
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment, unsigned long color,
+ unsigned long start, unsigned long end)
+{
+ struct drm_mm_node *hole_node;
+
+ hole_node = drm_mm_search_free_in_range_generic(mm,
+ size, alignment, color,
+ start, end, 0);
+ if (!hole_node)
+ return -ENOSPC;
+
+ drm_mm_insert_helper_range(hole_node, node,
+ size, alignment, color,
+ start, end);
+ return 0;
+}
- parent->size -= size;
- parent->start += size;
- return (child);
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
+{
+ return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
}
-/*
- * Put a block. Merge with the previous and / or next block if they are free.
- * Otherwise add to the free stack.
+/**
+ * Remove a memory node from the allocator.
*/
+void drm_mm_remove_node(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+
+ BUG_ON(node->scanned_block || node->scanned_prev_free
+ || node->scanned_next_free);
+
+ prev_node =
+ list_entry(node->node_list.prev, struct drm_mm_node, node_list);
+
+ if (node->hole_follows) {
+ BUG_ON(__drm_mm_hole_node_start(node) ==
+ __drm_mm_hole_node_end(node));
+ list_del(&node->hole_stack);
+ /* LINTED */
+ } else
+ BUG_ON(__drm_mm_hole_node_start(node) !=
+ __drm_mm_hole_node_end(node));
+
-void
-drm_mm_put_block(struct drm_mm_node *cur)
+ if (!prev_node->hole_follows) {
+ prev_node->hole_follows = 1;
+ list_add(&prev_node->hole_stack, &mm->hole_stack, (caddr_t)prev_node);
+ } else
+ list_move(&prev_node->hole_stack, &mm->hole_stack, (caddr_t)prev_node);
+
+ list_del(&node->node_list);
+ node->allocated = 0;
+}
+
+/*
+ * Remove a memory node from the allocator and free the allocated struct
+ * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
+ * drm_mm_get_block functions.
+ */
+void drm_mm_put_block(struct drm_mm_node *node)
{
- struct drm_mm *mm = cur->mm;
- struct list_head *cur_head = &cur->ml_entry;
- struct list_head *root_head = &mm->ml_entry;
- struct drm_mm_node *prev_node = NULL;
- struct drm_mm_node *next_node;
+ struct drm_mm *mm = node->mm;
- int merged = 0;
+ drm_mm_remove_node(node);
- if (cur_head->prev != root_head) {
- prev_node = list_entry(cur_head->prev,
- struct drm_mm_node, ml_entry);
- if (prev_node->free) {
- prev_node->size += cur->size;
- merged = 1;
- }
- }
- if (cur_head->next != root_head) {
- next_node = list_entry(cur_head->next,
- struct drm_mm_node, ml_entry);
- if (next_node->free) {
- if (merged) {
- prev_node->size += next_node->size;
- list_del(&next_node->ml_entry);
- list_del(&next_node->fl_entry);
- drm_free(next_node,
- sizeof (*next_node), DRM_MEM_MM);
- } else {
- next_node->size += cur->size;
- next_node->start = cur->start;
- merged = 1;
- }
- }
- }
- if (!merged) {
- cur->free = 1;
- list_add(&cur->fl_entry, &mm->fl_entry, (caddr_t)cur);
- } else {
- list_del(&cur->ml_entry);
- drm_free(cur, sizeof (*cur), DRM_MEM_MM);
+ spin_lock(&mm->unused_lock);
+ if (mm->num_unused < MM_UNUSED_TARGET) {
+ list_add(&node->node_list, &mm->unused_nodes, (caddr_t)node);
+ ++mm->num_unused;
+ } else
+ kfree(node, sizeof(struct drm_mm_node));
+ spin_unlock(&mm->unused_lock);
+}
+
+static int check_free_hole(unsigned long start, unsigned long end,
+ unsigned long size, unsigned alignment)
+{
+ if (end - start < size)
+ return 0;
+
+ if (alignment) {
+ unsigned tmp = start % alignment;
+ if (tmp)
+ start += alignment - tmp;
}
+
+ return end >= start + size;
}
-struct drm_mm_node *
-drm_mm_get_block(struct drm_mm_node *parent,
- unsigned long size,
- unsigned alignment)
+struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ bool best_match)
{
+ struct drm_mm_node *entry;
+ struct drm_mm_node *best;
+ unsigned long adj_start;
+ unsigned long adj_end;
+ unsigned long best_size;
- struct drm_mm_node *align_splitoff = NULL;
- struct drm_mm_node *child;
- unsigned tmp = 0;
+ BUG_ON(mm->scanned_blocks);
- if (alignment)
- tmp = parent->start % alignment;
+ best = NULL;
+ best_size = ~0UL;
- if (tmp) {
- align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
- if (!align_splitoff)
- return (NULL);
- }
+ drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+ if (mm->color_adjust) {
+ mm->color_adjust(entry, color, &adj_start, &adj_end);
+ if (adj_end <= adj_start)
+ continue;
+ }
- if (parent->size == size) {
- list_del_init(&parent->fl_entry);
- parent->free = 0;
- return (parent);
- } else {
- child = drm_mm_split_at_start(parent, size);
- }
+ if (!check_free_hole(adj_start, adj_end, size, alignment))
+ continue;
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
+ if (!best_match)
+ return entry;
+
+ if (entry->size < best_size) {
+ best = entry;
+ best_size = entry->size;
+ }
+ }
- return (child);
+ return best;
}
-struct drm_mm_node *
-drm_mm_search_free(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- int best_match)
+struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ bool best_match)
{
- struct list_head *list;
- const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
+ unsigned long adj_start;
+ unsigned long adj_end;
unsigned long best_size;
- unsigned wasted;
+
+ BUG_ON(mm->scanned_blocks);
best = NULL;
best_size = ~0UL;
- list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_mm_node, fl_entry);
- wasted = 0;
+ drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+ if (adj_start < start)
+ adj_start = start;
+ if (adj_end > end)
+ adj_end = end;
+
+ if (mm->color_adjust) {
+ mm->color_adjust(entry, color, &adj_start, &adj_end);
+ if (adj_end <= adj_start)
+ continue;
+ }
- if (entry->size < size)
+ if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
- if (alignment) {
- register unsigned tmp = entry->start % alignment;
- if (tmp)
- wasted += alignment - tmp;
+ if (!best_match)
+ return entry;
+
+ if (entry->size < best_size) {
+ best = entry;
+ best_size = entry->size;
}
+ }
+ return best;
+}
- if (entry->size >= size + wasted) {
- if (!best_match)
- return (entry);
- if (size < best_size) {
- best = entry;
- best_size = entry->size;
- }
- }
+/**
+ * Moves an allocation. To be used with embedded struct drm_mm_node.
+ */
+void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
+{
+ list_replace(&old->node_list, &new->node_list);
+ list_replace(&old->hole_stack, &new->hole_stack);
+ new->hole_follows = old->hole_follows;
+ new->mm = old->mm;
+ new->start = old->start;
+ new->size = old->size;
+ new->color = old->color;
+
+ old->allocated = 0;
+ new->allocated = 1;
+}
+
+/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan(struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color)
+{
+ mm->scan_color = color;
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_end = 0;
+ mm->scan_check_range = 0;
+ mm->prev_scanned_node = NULL;
+}
+
+/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole. This version is for range-restricted scans.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan_with_range(struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end)
+{
+ mm->scan_color = color;
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_end = 0;
+ mm->scan_start = start;
+ mm->scan_end = end;
+ mm->scan_check_range = 1;
+ mm->prev_scanned_node = NULL;
+}
+
+/**
+ * Add a node to the scan list that might be freed to make space for the desired
+ * hole.
+ *
+ * Returns non-zero, if a hole has been found, zero otherwise.
+ */
+int drm_mm_scan_add_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+ unsigned long hole_start, hole_end;
+ unsigned long adj_start, adj_end;
+
+ mm->scanned_blocks++;
+
+ BUG_ON(node->scanned_block);
+ node->scanned_block = 1;
+
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
+
+ node->scanned_preceeds_hole = prev_node->hole_follows;
+ prev_node->hole_follows = 1;
+ list_del(&node->node_list);
+ node->node_list.prev = &prev_node->node_list;
+ node->node_list.next = &mm->prev_scanned_node->node_list;
+ mm->prev_scanned_node = node;
+
+ adj_start = hole_start = drm_mm_hole_node_start(prev_node);
+ adj_end = hole_end = drm_mm_hole_node_end(prev_node);
+
+ if (mm->scan_check_range) {
+ if (adj_start < mm->scan_start)
+ adj_start = mm->scan_start;
+ if (adj_end > mm->scan_end)
+ adj_end = mm->scan_end;
+ }
+
+ if (mm->color_adjust)
+ mm->color_adjust(prev_node, mm->scan_color,
+ &adj_start, &adj_end);
+
+ if (check_free_hole(adj_start , adj_end,
+ mm->scan_size, mm->scan_alignment)) {
+ mm->scan_hit_start = hole_start;
+ mm->scan_hit_end = hole_end;
+ return 1;
}
- return (best);
+ return 0;
}
-int
-drm_mm_clean(struct drm_mm *mm)
+/**
+ * Remove a node from the scan list.
+ *
+ * Nodes _must_ be removed in the exact same order from the scan list as they
+ * have been added, otherwise the internal state of the memory manager will be
+ * corrupted.
+ *
+ * When the scan list is empty, the selected memory nodes can be freed. An
+ * immediatly following drm_mm_search_free with best_match = 0 will then return
+ * the just freed block (because its at the top of the free_stack list).
+ *
+ * Returns one if this block should be evicted, zero otherwise. Will always
+ * return zero when no hole has been found.
+ */
+int drm_mm_scan_remove_block(struct drm_mm_node *node)
{
- struct list_head *head = &mm->ml_entry;
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
- return (head->next->next == head);
+ mm->scanned_blocks--;
+
+ BUG_ON(!node->scanned_block);
+ node->scanned_block = 0;
+
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
+
+ prev_node->hole_follows = node->scanned_preceeds_hole;
+ list_add(&node->node_list, &prev_node->node_list, (caddr_t)node);
+
+ return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
+ node->start < mm->scan_hit_end);
}
-int
-drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size)
+
+int drm_mm_clean(struct drm_mm * mm)
{
- INIT_LIST_HEAD(&mm->ml_entry);
- INIT_LIST_HEAD(&mm->fl_entry);
+ struct list_head *head = &mm->head_node.node_list;
- return (drm_mm_create_tail_node(mm, start, size));
+ return (head->next->next == head);
}
-void
-drm_mm_takedown(struct drm_mm *mm)
+void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
- struct list_head *bnode = mm->fl_entry.next;
- struct drm_mm_node *entry;
+ INIT_LIST_HEAD(&mm->hole_stack);
+ INIT_LIST_HEAD(&mm->unused_nodes);
+ mm->num_unused = 0;
+ mm->scanned_blocks = 0;
+ spin_lock_init(&mm->unused_lock);
+
+ /* Clever trick to avoid a special case in the free hole tracking. */
+ INIT_LIST_HEAD(&mm->head_node.node_list);
+ INIT_LIST_HEAD(&mm->head_node.hole_stack);
+ mm->head_node.hole_follows = 1;
+ mm->head_node.scanned_block = 0;
+ mm->head_node.scanned_prev_free = 0;
+ mm->head_node.scanned_next_free = 0;
+ mm->head_node.mm = mm;
+ mm->head_node.start = start + size;
+ mm->head_node.size = start - mm->head_node.start;
+ mm->head_node.node_list.contain_ptr = (caddr_t)&mm->head_node;
+ list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack, (caddr_t)&mm->head_node);
+ mm->color_adjust = NULL;
+}
+
- entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+void drm_mm_takedown(struct drm_mm * mm)
+{
+ struct drm_mm_node *entry, *next;
- if (entry->ml_entry.next != &mm->ml_entry ||
- entry->fl_entry.next != &mm->fl_entry) {
+ if (!list_empty(&mm->head_node.node_list)) {
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
return;
}
- list_del(&entry->fl_entry);
- list_del(&entry->ml_entry);
+ spin_lock(&mm->unused_lock);
+ list_for_each_entry_safe(entry, next, struct drm_mm_node, &mm->unused_nodes, node_list) {
+ list_del(&entry->node_list);
+ kfree(entry, sizeof(struct drm_mm_node));
+ --mm->num_unused;
+ }
+ spin_unlock(&mm->unused_lock);
- drm_free(entry, sizeof (*entry), DRM_MEM_MM);
+ BUG_ON(mm->num_unused != 0);
}
-void
-drm_mm_clean_ml(const struct drm_mm *mm)
+static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
+ const char *prefix)
{
- const struct list_head *mlstack = &mm->ml_entry;
- struct list_head *list, *temp;
- struct drm_mm_node *entry;
+ unsigned long hole_start, hole_end, hole_size;
+
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(entry);
+ hole_end = drm_mm_hole_node_end(entry);
+ hole_size = hole_end - hole_start;
+ DRM_DEBUG("%s 0x%08lx-0x%08lx: %8lu: free\n",
+ prefix, hole_start, hole_end,
+ hole_size);
+ return hole_size;
+ }
- if (mlstack->next == NULL)
- return;
+ return 0;
+}
- list_for_each_safe(list, temp, mlstack) {
- entry = list_entry(list, struct drm_mm_node, ml_entry);
- DRM_DEBUG("ml_entry 0x%x, size 0x%x, start 0x%x",
- entry, entry->size, entry->start);
+void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
+{
+ struct drm_mm_node *entry;
+ unsigned long total_used = 0, total_free = 0, total = 0;
+
+ total_free += drm_mm_debug_hole(&mm->head_node, prefix);
- list_del(&entry->fl_entry);
- list_del(&entry->ml_entry);
- drm_free(entry, sizeof (*entry), DRM_MEM_MM);
+ drm_mm_for_each_node(entry, struct drm_mm_node, mm) {
+ DRM_DEBUG("%s 0x%08lx-0x%08lx: %8lu: used\n",
+ prefix, entry->start, entry->start + entry->size,
+ entry->size);
+ total_used += entry->size;
+ total_free += drm_mm_debug_hole(entry, prefix);
}
+ total = total_free + total_used;
+
+ DRM_DEBUG("%s total: %lu, used %lu free %lu\n", prefix, total,
+ total_used, total_free);
+}
+
+bool drm_mm_initialized(struct drm_mm *mm)
+{
+ return (mm->hole_stack.next != NULL);
+}
+
+unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+{
+ return hole_node->start + hole_node->size;
+}
+
+unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+{
+ BUG_ON(!hole_node->hole_follows);
+ return __drm_mm_hole_node_start(hole_node);
+}
+
+unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+ struct drm_mm_node *node;
+ node = list_entry(hole_node->node_list.next,
+ struct drm_mm_node, node_list);
+ return node->start;
+}
+
+unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+ return __drm_mm_hole_node_end(hole_node);
+}
+
+struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment)
+{
+ return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
+}
+struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment)
+{
+ return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
+}
+struct drm_mm_node *drm_mm_get_block_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ return drm_mm_get_block_range_generic(parent, size, alignment, 0,
+ start, end, 0);
+}
+struct drm_mm_node *drm_mm_get_block_atomic_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ return drm_mm_get_block_range_generic(parent, size, alignment, 0,
+ start, end, 1);
+}
+
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ bool best_match)
+{
+ return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
+}
+struct drm_mm_node *drm_mm_search_free_in_range(
+ const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ bool best_match)
+{
+ return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
+ start, end, best_match);
}
diff --git a/usr/src/uts/common/io/drm/drm_modes.c b/usr/src/uts/common/io/drm/drm_modes.c
new file mode 100644
index 0000000..10f843e
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_modes.c
@@ -0,0 +1,1140 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ *
+ * Copyright © 1997-2003 by The XFree86 Project, Inc.
+ * Copyright © 2007 Dave Airlie
+ * Copyright (c) 2007-2008, 2013, Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2005-2006 Luc Verhaegen
+ * Copyright (c) 2001, Andy Ritger aritger@nvidia.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the name of the copyright holder(s)
+ * and author(s) shall not be used in advertising or otherwise to promote
+ * the sale, use or other dealings in this Software without prior written
+ * authorization from the copyright holder(s) and author(s).
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_linux_list.h"
+/**
+ * drm_mode_debug_printmodeline - debug print a mode
+ * @dev: DRM device
+ * @mode: mode to print
+ *
+ * LOCKING:
+ * None.
+ *
+ * Describe @mode using DRM_DEBUG.
+ */
+void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
+ "0x%x 0x%x\n",
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+}
+
+/**
+ * drm_cvt_mode -create a modeline based on CVT algorithm
+ * @dev: DRM device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh : vrefresh rate
+ * @reduced : Whether the GTF calculation is simplified
+ * @interlaced:Whether the interlace is supported
+ *
+ * LOCKING:
+ * none.
+ *
+ * return the modeline based on CVT algorithm
+ *
+ * This function is called to generate the modeline based on CVT algorithm
+ * according to the hdisplay, vdisplay, vrefresh.
+ * It is based from the VESA(TM) Coordinated Video Timing Generator by
+ * Graham Loveridge April 9, 2003 available at
+ * http://www.vesa.org/public/CVT/CVTd6r1.xls
+ *
+ * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
+ * What I have done is to translate it by using integer calculation.
+ */
+#define HV_FACTOR 1000
+struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
+ int vdisplay, int vrefresh,
+ bool reduced, bool interlaced, bool margins)
+{
+ /* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define CVT_MARGIN_PERCENTAGE 18
+ /* 2) character cell horizontal granularity (pixels) - default 8 */
+#define CVT_H_GRANULARITY 8
+ /* 3) Minimum vertical porch (lines) - default 3 */
+#define CVT_MIN_V_PORCH 3
+ /* 4) Minimum number of vertical back porch lines - default 6 */
+#define CVT_MIN_V_BPORCH 6
+ /* Pixel Clock step (kHz) */
+#define CVT_CLOCK_STEP 250
+ struct drm_display_mode *drm_mode;
+ unsigned int vfieldrate, hperiod;
+ int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
+ int interlace;
+
+ /* allocate the drm_display_mode structure. If failure, we will
+ * return directly
+ */
+ drm_mode = drm_mode_create(dev);
+ if (!drm_mode)
+ return NULL;
+
+ /* the CVT default refresh rate is 60Hz */
+ if (!vrefresh)
+ vrefresh = 60;
+
+ /* the required field fresh rate */
+ if (interlaced)
+ vfieldrate = vrefresh * 2;
+ else
+ vfieldrate = vrefresh;
+
+ /* horizontal pixels */
+ hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
+
+ /* determine the left&right borders */
+ hmargin = 0;
+ if (margins) {
+ hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+ hmargin -= hmargin % CVT_H_GRANULARITY;
+ }
+ /* find the total active pixels */
+ drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
+
+ /* find the number of lines per field */
+ if (interlaced)
+ vdisplay_rnd = vdisplay / 2;
+ else
+ vdisplay_rnd = vdisplay;
+
+ /* find the top & bottom borders */
+ vmargin = 0;
+ if (margins)
+ vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+
+ drm_mode->vdisplay = vdisplay + 2 * vmargin;
+
+ /* Interlaced */
+ if (interlaced)
+ interlace = 1;
+ else
+ interlace = 0;
+
+ /* Determine VSync Width from aspect ratio */
+ if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
+ vsync = 4;
+ else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
+ vsync = 5;
+ else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
+ vsync = 6;
+ else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
+ vsync = 7;
+ else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
+ vsync = 7;
+ else /* custom */
+ vsync = 10;
+
+ if (!reduced) {
+ /* simplify the GTF calculation */
+ /* 4) Minimum time of vertical sync + back porch interval (µs)
+ * default 550.0
+ */
+ int tmp1, tmp2;
+#define CVT_MIN_VSYNC_BP 550
+ /* 3) Nominal HSync width (% of line period) - default 8 */
+#define CVT_HSYNC_PERCENTAGE 8
+ unsigned int hblank_percentage;
+ /* LINTED */
+ int vsyncandback_porch, vback_porch, hblank;
+
+ /* estimated the horizontal period */
+ tmp1 = HV_FACTOR * 1000000 -
+ CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
+ tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
+ interlace;
+ hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
+
+ tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
+ /* 9. Find number of lines in sync + backporch */
+ if (tmp1 < (vsync + CVT_MIN_V_PORCH))
+ vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
+ else
+ vsyncandback_porch = tmp1;
+ /* 10. Find number of lines in back porch */
+ vback_porch = vsyncandback_porch - vsync;
+ drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
+ vsyncandback_porch + CVT_MIN_V_PORCH;
+ /* 5) Definition of Horizontal blanking time limitation */
+ /* Gradient (%/kHz) - default 600 */
+#define CVT_M_FACTOR 600
+ /* Offset (%) - default 40 */
+#define CVT_C_FACTOR 40
+ /* Blanking time scaling factor - default 128 */
+#define CVT_K_FACTOR 128
+ /* Scaling factor weighting - default 20 */
+#define CVT_J_FACTOR 20
+#define CVT_M_PRIME (CVT_M_FACTOR * CVT_K_FACTOR / 256)
+#define CVT_C_PRIME ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
+ CVT_J_FACTOR)
+ /* 12. Find ideal blanking duty cycle from formula */
+ hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
+ hperiod / 1000;
+ /* 13. Blanking time */
+ if (hblank_percentage < 20 * HV_FACTOR)
+ hblank_percentage = 20 * HV_FACTOR;
+ hblank = drm_mode->hdisplay * hblank_percentage /
+ (100 * HV_FACTOR - hblank_percentage);
+ hblank -= hblank % (2 * CVT_H_GRANULARITY);
+ /* 14. find the total pixes per line */
+ drm_mode->htotal = drm_mode->hdisplay + hblank;
+ drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
+ drm_mode->hsync_start = drm_mode->hsync_end -
+ (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
+ drm_mode->hsync_start += CVT_H_GRANULARITY -
+ drm_mode->hsync_start % CVT_H_GRANULARITY;
+ /* fill the Vsync values */
+ drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
+ drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+ } else {
+ /* Reduced blanking */
+ /* Minimum vertical blanking interval time (µs)- default 460 */
+#define CVT_RB_MIN_VBLANK 460
+ /* Fixed number of clocks for horizontal sync */
+#define CVT_RB_H_SYNC 32
+ /* Fixed number of clocks for horizontal blanking */
+#define CVT_RB_H_BLANK 160
+ /* Fixed number of lines for vertical front porch - default 3*/
+#define CVT_RB_VFPORCH 3
+ int vbilines;
+ int tmp1, tmp2;
+ /* 8. Estimate Horizontal period. */
+ tmp1 = HV_FACTOR * 1000000 -
+ CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
+ tmp2 = vdisplay_rnd + 2 * vmargin;
+ hperiod = tmp1 / (tmp2 * vfieldrate);
+ /* 9. Find number of lines in vertical blanking */
+ vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
+ /* 10. Check if vertical blanking is sufficient */
+ if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
+ vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
+ /* 11. Find total number of lines in vertical field */
+ drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
+ /* 12. Find total number of pixels in a line */
+ drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
+ /* Fill in HSync values */
+ drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
+ drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
+ /* Fill in VSync values */
+ drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
+ drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+ }
+ /* 15/13. Find pixel clock frequency (kHz for xf86) */
+ drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
+ drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
+ /* 18/16. Find actual vertical frame frequency */
+ /* ignore - just set the mode flag for interlaced */
+ if (interlaced) {
+ drm_mode->vtotal *= 2;
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
+ /* Fill the mode line name */
+ drm_mode_set_name(drm_mode);
+ if (reduced)
+ drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
+ DRM_MODE_FLAG_NVSYNC);
+ else
+ drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_NHSYNC);
+
+ return drm_mode;
+}
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev :drm device
+ * @hdisplay :hdisplay size
+ * @vdisplay :vdisplay size
+ * @vrefresh :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins :whether the margin is supported
+ * @GTF_[MCKJ] :extended GTF formula parameters
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two. For a C of 40, pass in 80.
+ */
+struct drm_display_mode *
+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+ int vrefresh, bool interlaced, int margins,
+ int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
+{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define GTF_MARGIN_PERCENTAGE 18
+ /* 2) character cell horizontal granularity (pixels) - default 8 */
+#define GTF_CELL_GRAN 8
+ /* 3) Minimum vertical porch (lines) - default 3 */
+#define GTF_MIN_V_PORCH 1
+ /* width of vsync in lines */
+#define V_SYNC_RQD 3
+ /* width of hsync as % of total line */
+#define H_SYNC_PERCENT 8
+ /* min time of vsync + back porch (microsec) */
+#define MIN_VSYNC_PLUS_BP 550
+ /* C' and M' are part of the Blanking Duty Cycle computation */
+#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
+#define GTF_M_PRIME (GTF_K * GTF_M / 256)
+ struct drm_display_mode *drm_mode;
+ unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
+ int top_margin, bottom_margin;
+ int interlace;
+ unsigned int hfreq_est;
+ /* LINTED */
+ int vsync_plus_bp, vback_porch;
+ /* LINTED */
+ unsigned int vtotal_lines, vfieldrate_est, hperiod;
+ /* LINTED */
+ unsigned int vfield_rate, vframe_rate;
+ int left_margin, right_margin;
+ unsigned int total_active_pixels, ideal_duty_cycle;
+ unsigned int hblank, total_pixels, pixel_freq;
+ int hsync, hfront_porch, vodd_front_porch_lines;
+ unsigned int tmp1, tmp2;
+
+ drm_mode = drm_mode_create(dev);
+ if (!drm_mode)
+ return NULL;
+
+ /* 1. In order to give correct results, the number of horizontal
+ * pixels requested is first processed to ensure that it is divisible
+ * by the character size, by rounding it to the nearest character
+ * cell boundary:
+ */
+ hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+ hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
+
+ /* 2. If interlace is requested, the number of vertical lines assumed
+ * by the calculation must be halved, as the computation calculates
+ * the number of vertical lines per field.
+ */
+ if (interlaced)
+ vdisplay_rnd = vdisplay / 2;
+ else
+ vdisplay_rnd = vdisplay;
+
+ /* 3. Find the frame rate required: */
+ if (interlaced)
+ vfieldrate_rqd = vrefresh * 2;
+ else
+ vfieldrate_rqd = vrefresh;
+
+ /* 4. Find number of lines in Top margin: */
+ top_margin = 0;
+ if (margins)
+ top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+ 1000;
+ /* 5. Find number of lines in bottom margin: */
+ bottom_margin = top_margin;
+
+ /* 6. If interlace is required, then set variable interlace: */
+ if (interlaced)
+ interlace = 1;
+ else
+ interlace = 0;
+
+ /* 7. Estimate the Horizontal frequency */
+ {
+ tmp1 = (1000000 - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
+ tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
+ 2 + interlace;
+ hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
+ }
+
+ /* 8. Find the number of lines in V sync + back porch */
+ /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
+ vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
+ vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
+ /* 9. Find the number of lines in V back porch alone: */
+ vback_porch = vsync_plus_bp - V_SYNC_RQD;
+ /* 10. Find the total number of lines in Vertical field period: */
+ vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
+ vsync_plus_bp + GTF_MIN_V_PORCH;
+ /* 11. Estimate the Vertical field frequency: */
+ vfieldrate_est = hfreq_est / vtotal_lines;
+ /* 12. Find the actual horizontal period: */
+ hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
+
+ /* 13. Find the actual Vertical field frequency: */
+ vfield_rate = hfreq_est / vtotal_lines;
+ /* 14. Find the Vertical frame frequency: */
+ if (interlaced)
+ vframe_rate = vfield_rate / 2;
+ else
+ vframe_rate = vfield_rate;
+ /* 15. Find number of pixels in left margin: */
+ if (margins)
+ left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+ 1000;
+ else
+ left_margin = 0;
+
+ /* 16.Find number of pixels in right margin: */
+ right_margin = left_margin;
+ /* 17.Find total number of active pixels in image and left and right */
+ total_active_pixels = hdisplay_rnd + left_margin + right_margin;
+ /* 18.Find the ideal blanking duty cycle from blanking duty cycle */
+ ideal_duty_cycle = GTF_C_PRIME * 1000 -
+ (GTF_M_PRIME * 1000000 / hfreq_est);
+ /* 19.Find the number of pixels in the blanking time to the nearest
+ * double character cell: */
+ hblank = total_active_pixels * ideal_duty_cycle /
+ (100000 - ideal_duty_cycle);
+ hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
+ hblank = hblank * 2 * GTF_CELL_GRAN;
+ /* 20.Find total number of pixels: */
+ total_pixels = total_active_pixels + hblank;
+ /* 21.Find pixel clock frequency: */
+ pixel_freq = total_pixels * hfreq_est / 1000;
+ /* Stage 1 computations are now complete; I should really pass
+ * the results to another function and do the Stage 2 computations,
+ * but I only need a few more values so I'll just append the
+ * computations here for now */
+ /* 17. Find the number of pixels in the horizontal sync period: */
+ hsync = H_SYNC_PERCENT * total_pixels / 100;
+ hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+ hsync = hsync * GTF_CELL_GRAN;
+ /* 18. Find the number of pixels in horizontal front porch period */
+ hfront_porch = hblank / 2 - hsync;
+ /* 36. Find the number of lines in the odd front porch period: */
+ vodd_front_porch_lines = GTF_MIN_V_PORCH ;
+
+ /* finally, pack the results in the mode struct */
+ drm_mode->hdisplay = hdisplay_rnd;
+ drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
+ drm_mode->hsync_end = drm_mode->hsync_start + hsync;
+ drm_mode->htotal = total_pixels;
+ drm_mode->vdisplay = vdisplay_rnd;
+ drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
+ drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
+ drm_mode->vtotal = vtotal_lines;
+
+ drm_mode->clock = pixel_freq;
+
+ if (interlaced) {
+ drm_mode->vtotal *= 2;
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
+
+ drm_mode_set_name(drm_mode);
+ if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
+ drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+ else
+ drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
+
+ return drm_mode;
+}
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev :drm device
+ * @hdisplay :hdisplay size
+ * @vdisplay :vdisplay size
+ * @vrefresh :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins :whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ * GTF Spreadsheet by Andy Morrish (1/5/97)
+ * available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ *
+ * Standard GTF parameters:
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
+ */
+struct drm_display_mode *
+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+ bool lace, int margins)
+{
+ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
+ margins, 600, 40 * 2, 128, 20 * 2);
+}
+
+#ifdef CONFIG_VIDEOMODE_HELPERS
+int drm_display_mode_from_videomode(const struct videomode *vm,
+ struct drm_display_mode *dmode)
+{
+ dmode->hdisplay = vm->hactive;
+ dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
+ dmode->hsync_end = dmode->hsync_start + vm->hsync_len;
+ dmode->htotal = dmode->hsync_end + vm->hback_porch;
+
+ dmode->vdisplay = vm->vactive;
+ dmode->vsync_start = dmode->vdisplay + vm->vfront_porch;
+ dmode->vsync_end = dmode->vsync_start + vm->vsync_len;
+ dmode->vtotal = dmode->vsync_end + vm->vback_porch;
+
+ dmode->clock = vm->pixelclock / 1000;
+
+ dmode->flags = 0;
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
+ dmode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
+ dmode->flags |= DRM_MODE_FLAG_NHSYNC;
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
+ dmode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
+ dmode->flags |= DRM_MODE_FLAG_NVSYNC;
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ dmode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN)
+ dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
+ if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
+ dmode->flags |= DRM_MODE_FLAG_DBLCLK;
+ drm_mode_set_name(dmode);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+/**
+ * of_get_drm_display_mode - get a drm_display_mode from devicetree
+ * @np: device_node with the timing specification
+ * @dmode: will be set to the return value
+ * @index: index into the list of display timings in devicetree
+ *
+ * This function is expensive and should only be used, if only one mode is to be
+ * read from DT. To get multiple modes start with of_get_display_timings and
+ * work with that instead.
+ */
+int of_get_drm_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode, int index)
+{
+ struct videomode vm;
+ int ret;
+
+ ret = of_get_videomode(np, &vm, index);
+ if (ret)
+ return ret;
+
+ drm_display_mode_from_videomode(&vm, dmode);
+
+ pr_debug("%s: got %dx%d display mode from %s\n",
+ of_node_full_name(np), vm.hactive, vm.vactive, np->name);
+ drm_mode_debug_printmodeline(dmode);
+
+ return 0;
+}
+#endif /* CONFIG_OF */
+#endif /* CONFIG_VIDEOMODE_HELPERS */
+
+/**
+ * drm_mode_set_name - set the name on a mode
+ * @mode: name will be set in this mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Set the name of @mode to a standard format.
+ */
+void drm_mode_set_name(struct drm_display_mode *mode)
+{
+ bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ (void) snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+ mode->hdisplay, mode->vdisplay,
+ interlaced ? "i" : "");
+}
+
+/**
+ * drm_mode_list_concat - move modes from one list to another
+ * @head: source list
+ * @new: dst list
+ *
+ * LOCKING:
+ * Caller must ensure both lists are locked.
+ *
+ * Move all the modes from @head to @new.
+ */
+void drm_mode_list_concat(struct list_head *head, struct list_head *new)
+{
+
+ struct list_head *entry, *tmp;
+
+ list_for_each_safe(entry, tmp, head) {
+ list_move_tail(entry, new, (caddr_t)entry);
+ }
+}
+
+/**
+ * drm_mode_width - get the width of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's width (hdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->hdisplay
+ */
+int drm_mode_width(const struct drm_display_mode *mode)
+{
+ return mode->hdisplay;
+
+}
+
+/**
+ * drm_mode_height - get the height of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's height (vdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->vdisplay
+ */
+int drm_mode_height(const struct drm_display_mode *mode)
+{
+ return mode->vdisplay;
+}
+
+/** drm_mode_hsync - get the hsync of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ */
+int drm_mode_hsync(const struct drm_display_mode *mode)
+{
+ unsigned int calc_val;
+
+ if (mode->hsync)
+ return mode->hsync;
+
+ if (mode->htotal < 0)
+ return 0;
+
+ calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+ calc_val += 500; /* round to 1000Hz */
+ calc_val /= 1000; /* truncate to kHz */
+
+ return calc_val;
+}
+
+/**
+ * drm_mode_vrefresh - get the vrefresh of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
+ *
+ * FIXME: why is this needed? shouldn't vrefresh be set already?
+ *
+ * RETURNS:
+ * Vertical refresh rate. It will be the result of actual value plus 0.5.
+ * If it is 70.288, it will return 70Hz.
+ * If it is 59.6, it will return 60Hz.
+ */
+int drm_mode_vrefresh(const struct drm_display_mode *mode)
+{
+ int refresh = 0;
+ unsigned int calc_val;
+
+ if (mode->vrefresh > 0)
+ refresh = mode->vrefresh;
+ else if (mode->htotal > 0 && mode->vtotal > 0) {
+ int vtotal;
+ vtotal = mode->vtotal;
+ /* work out vrefresh the value will be x1000 */
+ calc_val = (mode->clock * 1000);
+ calc_val /= mode->htotal;
+ refresh = (calc_val + vtotal / 2) / vtotal;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ refresh *= 2;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ refresh /= 2;
+ if (mode->vscan > 1)
+ refresh /= mode->vscan;
+ }
+ return refresh;
+}
+
+/**
+ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
+ * @p: mode
+ * @adjust_flags: unused? (FIXME)
+ *
+ * LOCKING:
+ * None.
+ *
+ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ */
+void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
+{
+ if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
+ return;
+
+ p->crtc_hdisplay = p->hdisplay;
+ p->crtc_hsync_start = p->hsync_start;
+ p->crtc_hsync_end = p->hsync_end;
+ p->crtc_htotal = p->htotal;
+ p->crtc_hskew = p->hskew;
+ p->crtc_vdisplay = p->vdisplay;
+ p->crtc_vsync_start = p->vsync_start;
+ p->crtc_vsync_end = p->vsync_end;
+ p->crtc_vtotal = p->vtotal;
+
+ if (p->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
+ p->crtc_vdisplay /= 2;
+ p->crtc_vsync_start /= 2;
+ p->crtc_vsync_end /= 2;
+ p->crtc_vtotal /= 2;
+ }
+ }
+
+ if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
+ p->crtc_vdisplay *= 2;
+ p->crtc_vsync_start *= 2;
+ p->crtc_vsync_end *= 2;
+ p->crtc_vtotal *= 2;
+ }
+
+ if (p->vscan > 1) {
+ p->crtc_vdisplay *= p->vscan;
+ p->crtc_vsync_start *= p->vscan;
+ p->crtc_vsync_end *= p->vscan;
+ p->crtc_vtotal *= p->vscan;
+ }
+
+ p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
+ p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
+ p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
+ p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
+}
+
+
+/**
+ * drm_mode_copy - copy the mode
+ * @dst: mode to overwrite
+ * @src: mode to copy
+ *
+ * LOCKING:
+ * None.
+ *
+ * Copy an existing mode into another mode, preserving the object id
+ * of the destination mode.
+ */
+void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
+{
+ int id = dst->base.id;
+ struct list_head head = dst->head;
+
+ *dst = *src;
+ dst->base.id = id;
+ dst->head = head;
+}
+
+/**
+ * drm_mode_duplicate - allocate and duplicate an existing mode
+ * @m: mode to duplicate
+ *
+ * LOCKING:
+ * None.
+ *
+ * Just allocate a new mode, copy the existing mode into it, and return
+ * a pointer to it. Used to create new instances of established modes.
+ */
+struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ struct drm_display_mode *nmode;
+
+ nmode = drm_mode_create(dev);
+ if (!nmode)
+ return NULL;
+
+ drm_mode_copy(nmode, mode);
+
+ return nmode;
+}
+
+/**
+ * drm_mode_equal - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+{
+ /* do clock check convert to PICOS so fb modes get matched
+ * the same */
+ if (mode1->clock && mode2->clock) {
+ if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
+ return false;
+ } else if (mode1->clock != mode2->clock)
+ return false;
+
+ return drm_mode_equal_no_clocks(mode1, mode2);
+}
+
+/**
+ * drm_mode_equal_no_clocks - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent, but
+ * don't check the pixel clocks.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+{
+ if (mode1->hdisplay == mode2->hdisplay &&
+ mode1->hsync_start == mode2->hsync_start &&
+ mode1->hsync_end == mode2->hsync_end &&
+ mode1->htotal == mode2->htotal &&
+ mode1->hskew == mode2->hskew &&
+ mode1->vdisplay == mode2->vdisplay &&
+ mode1->vsync_start == mode2->vsync_start &&
+ mode1->vsync_end == mode2->vsync_end &&
+ mode1->vtotal == mode2->vtotal &&
+ mode1->vscan == mode2->vscan &&
+ mode1->flags == mode2->flags)
+ return true;
+
+ return false;
+}
+
+/**
+ * drm_mode_validate_size - make sure modes adhere to size constraints
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @maxX: maximum width
+ * @maxY: maximum height
+ * @maxPitch: max pitch
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * The DRM device (@dev) has size and pitch limits. Here we validate the
+ * modes we probed for @dev against those limits and set their status as
+ * necessary.
+ */
+/* LINTED */
+void drm_mode_validate_size(struct drm_device *dev,
+ struct list_head *mode_list,
+ int maxX, int maxY, int maxPitch)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, struct drm_display_mode, mode_list, head) {
+ if (maxPitch > 0 && mode->hdisplay > maxPitch)
+ mode->status = MODE_BAD_WIDTH;
+
+ if (maxX > 0 && mode->hdisplay > maxX)
+ mode->status = MODE_VIRTUAL_X;
+
+ if (maxY > 0 && mode->vdisplay > maxY)
+ mode->status = MODE_VIRTUAL_Y;
+ }
+}
+
+/**
+ * drm_mode_validate_clocks - validate modes against clock limits
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @min: minimum clock rate array
+ * @max: maximum clock rate array
+ * @n_ranges: number of clock ranges (size of arrays)
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Some code may need to check a mode list against the clock limits of the
+ * device in question. This function walks the mode list, testing to make
+ * sure each mode falls within a given range (defined by @min and @max
+ * arrays) and sets @mode->status as needed.
+ */
+/* LINTED */
+void drm_mode_validate_clocks(struct drm_device *dev,
+ struct list_head *mode_list,
+ int *min, int *max, int n_ranges)
+{
+ struct drm_display_mode *mode;
+ int i;
+
+ list_for_each_entry(mode, struct drm_display_mode, mode_list, head) {
+ bool good = false;
+ for (i = 0; i < n_ranges; i++) {
+ if (mode->clock >= min[i] && mode->clock <= max[i]) {
+ good = true;
+ break;
+ }
+ }
+ if (!good)
+ mode->status = MODE_CLOCK_RANGE;
+ }
+}
+
+/**
+ * drm_mode_prune_invalid - remove invalid modes from mode list
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @verbose: be verbose about it
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Once mode list generation is complete, a caller can use this routine to
+ * remove invalid modes from a mode list. If any of the modes have a
+ * status other than %MODE_OK, they are removed from @mode_list and freed.
+ */
+void drm_mode_prune_invalid(struct drm_device *dev,
+ struct list_head *mode_list, bool verbose)
+{
+ struct drm_display_mode *mode, *t;
+
+ list_for_each_entry_safe(mode, t, struct drm_display_mode, mode_list, head) {
+ if (mode->status != MODE_OK) {
+ list_del(&mode->head);
+ if (verbose) {
+ drm_mode_debug_printmodeline(mode);
+ DRM_DEBUG_KMS("Not using %s mode %d\n",
+ mode->name, mode->status);
+ }
+ drm_mode_destroy(dev, mode);
+ }
+ }
+}
+
+/**
+ * drm_mode_compare - compare modes for favorability
+ * @lh_a: list_head for first mode
+ * @lh_b: list_head for second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
+ * which is better.
+ *
+ * RETURNS:
+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
+ * positive if @lh_b is better than @lh_a.
+ */
+static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
+{
+ struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
+ struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
+ int diff;
+
+ diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
+ ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
+ if (diff)
+ return diff;
+ diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
+ if (diff)
+ return diff;
+
+ diff = b->vrefresh - a->vrefresh;
+ if (diff)
+ return diff;
+
+ diff = b->clock - a->clock;
+ return diff;
+}
+
+/**
+ * drm_mode_sort - sort mode list
+ * @mode_list: list to sort
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Sort @mode_list by favorability, putting good modes first.
+ */
+void drm_mode_sort(struct list_head *mode_list)
+{
+ struct list_head *cur, *end;
+ struct list_head *list, *temp;
+ int ordered = 1;
+
+ if (list_empty(mode_list))
+ return;
+
+ /* Pre-check the mode order
+ * In most cases, the modes is ordered.
+ */
+ for (list = mode_list->next, temp = list->next;
+ temp != mode_list;
+ list = temp, temp = temp->next) {
+ if (drm_mode_compare(list, temp) > 0){
+ ordered = 0;
+ break;
+ }
+ }
+
+ if (ordered)
+ return;
+
+ end = mode_list->next;
+ cur = end->next;
+ while (cur != mode_list) {
+ list_for_each_safe(list, temp, mode_list){
+ if (drm_mode_compare(list, cur) > 0) {
+ //insert
+ struct list_head *p_node = list->prev;
+ list_del(cur);
+ p_node->next = cur;
+ list->prev = cur;
+ cur->next = list;
+ cur->prev = p_node;
+
+ cur = end->next;
+ break;
+ }
+ if (list == end) {
+ end = cur;
+ cur = end->next;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * drm_mode_connector_list_update - update the mode list for the connector
+ * @connector: the connector to update
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * This moves the modes from the @connector probed_modes list
+ * to the actual mode list. It compares the probed mode against the current
+ * list and only adds different modes. All modes unverified after this point
+ * will be removed by the prune invalid modes.
+ */
+void drm_mode_connector_list_update(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ struct drm_display_mode *pmode, *pt;
+ int found_it;
+
+ list_for_each_entry_safe(pmode, pt, struct drm_display_mode,
+ &connector->probed_modes,
+ head) {
+ found_it = 0;
+ /* go through current modes checking for the new probed mode */
+ list_for_each_entry(mode, struct drm_display_mode, &connector->modes, head) {
+ if (drm_mode_equal(pmode, mode)) {
+ found_it = 1;
+ /* if equal delete the probed mode */
+ mode->status = pmode->status;
+ /* Merge type bits together */
+ mode->type |= pmode->type;
+ list_del(&pmode->head);
+ drm_mode_destroy(connector->dev, pmode);
+ break;
+ }
+ }
+
+ if (!found_it) {
+ list_move_tail(&pmode->head, &connector->modes, (caddr_t)pmode);
+ }
+ }
+}
+struct drm_display_mode *
+drm_mode_create_from_cmdline_mode(struct drm_device *dev,
+ struct drm_cmdline_mode *cmd)
+{
+ struct drm_display_mode *mode;
+
+ if (cmd->cvt)
+ mode = drm_cvt_mode(dev,
+ cmd->xres, cmd->yres,
+ cmd->refresh_specified ? cmd->refresh : 60,
+ cmd->rb, cmd->interlace,
+ cmd->margins);
+ else
+ mode = drm_gtf_mode(dev,
+ cmd->xres, cmd->yres,
+ cmd->refresh_specified ? cmd->refresh : 60,
+ cmd->interlace,
+ cmd->margins);
+ if (!mode)
+ return NULL;
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ return mode;
+}
diff --git a/usr/src/uts/common/io/drm/drm_msg.c b/usr/src/uts/common/io/drm/drm_msg.c
index 120776c..691a77b 100644
--- a/usr/src/uts/common/io/drm/drm_msg.c
+++ b/usr/src/uts/common/io/drm/drm_msg.c
@@ -1,34 +1,47 @@
/*
- * CDDL HEADER START
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
*
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
*
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
/*
- * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include "drmP.h"
void
+drm_debug_print(int cmn_err, const char *func, int line, const char *fmt, ...)
+{
+ va_list ap;
+ char format[256];
+
+ (void) snprintf(format, sizeof (format), "[drm:%s:%d] %s",
+ func, line, fmt);
+
+ va_start(ap, fmt);
+ vcmn_err(cmn_err, format, ap);
+ va_end(ap);
+}
+
+void
drm_debug(const char *fmt, ...)
{
va_list ap;
diff --git a/usr/src/uts/common/io/drm/drm_pci.c b/usr/src/uts/common/io/drm/drm_pci.c
index 37a02a1..530ea35 100644
--- a/usr/src/uts/common/io/drm/drm_pci.c
+++ b/usr/src/uts/common/io/drm/drm_pci.c
@@ -1,9 +1,11 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
*/
-/* BEGIN CSTYLED */
/**
* \file drm_pci.h
* \brief PCI consistent, DMA-accessible memory functions.
@@ -37,91 +39,18 @@
/**********************************************************************/
/** \name PCI memory */
/*@{*/
-/* END CSTYLED */
-
-#pragma ident "%Z%%M% %I% %E% SMI"
#include "drmP.h"
#include <vm/seg_kmem.h>
-#define PCI_DEVICE(x) (((x)>>11) & 0x1f)
-#define PCI_FUNCTION(x) (((x) & 0x700) >> 8)
-#define PCI_BUS(x) (((x) & 0xff0000) >> 16)
-
typedef struct drm_pci_resource {
- uint_t regnum;
+ uint_t regnum;
unsigned long offset;
unsigned long size;
} drm_pci_resource_t;
-int
-pci_get_info(drm_device_t *softstate, int *bus, int *slot, int *func)
-{
- int *regs_list;
- uint_t nregs = 0;
-
- if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, softstate->dip,
- DDI_PROP_DONTPASS, "reg", (int **)&regs_list, &nregs)
- != DDI_PROP_SUCCESS) {
- DRM_ERROR("pci_get_info: get pci function bus device failed");
- goto error;
- }
- *bus = (int)PCI_BUS(regs_list[0]);
- *slot = (int)PCI_DEVICE(regs_list[0]);
- *func = (int)PCI_FUNCTION(regs_list[0]);
-
- if (nregs > 0) {
- ddi_prop_free(regs_list);
- }
- return (DDI_SUCCESS);
-error:
- if (nregs > 0) {
- ddi_prop_free(regs_list);
- }
- return (DDI_FAILURE);
-}
-
-int
-pci_get_irq(drm_device_t *statep)
-{
- int irq;
-
- extern int drm_supp_get_irq(void *);
-
- irq = ddi_prop_get_int(DDI_DEV_T_ANY,
- statep->dip, DDI_PROP_DONTPASS, "interrupts", -1);
-
- if (irq > 0) {
- irq = drm_supp_get_irq(statep->drm_handle);
- }
-
- return (irq);
-}
-
-int
-pci_get_vendor(drm_device_t *statep)
-{
- int vendorid;
-
- vendorid = ddi_prop_get_int(DDI_DEV_T_ANY,
- statep->dip, DDI_PROP_DONTPASS, "vendor-id", 0);
-
- return (vendorid);
-}
-
-int
-pci_get_device(drm_device_t *statep)
-{
- int deviceid;
-
- deviceid = ddi_prop_get_int(DDI_DEV_T_ANY,
- statep->dip, DDI_PROP_DONTPASS, "device-id", 0);
-
- return (deviceid);
-}
-
void
-drm_core_ioremap(struct drm_local_map *map, drm_device_t *dev)
+drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
{
if ((map->type == _DRM_AGP) && dev->agp) {
/*
@@ -137,8 +66,8 @@ drm_core_ioremap(struct drm_local_map *map, drm_device_t *dev)
* After that, access to physical memory managed by agp gart
* hardware in kernel space doesn't go through agp hardware,
* it will be: kernel virtual ---> physical address.
- * Obviously, it is more efficient. But in solaris operating
- * system, the ioctl AGPIOC_ALLOCATE of apggart driver does
+ * Obviously, it is more efficient. But in Solaris operating
+ * system, the ioctl AGPIOC_ALLOCATE of agpgart driver does
* not return physical address. We are unable to create the
* direct mapping between kernel space and agp memory. So,
* we remove the calling to agp_remap().
@@ -150,10 +79,11 @@ drm_core_ioremap(struct drm_local_map *map, drm_device_t *dev)
}
}
-/*ARGSUSED*/
void
-drm_core_ioremapfree(struct drm_local_map *map, drm_device_t *dev)
+drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
{
+ _NOTE(ARGUNUSED(dev))
+
if (map->type != _DRM_AGP) {
if (map->handle && map->size)
drm_ioremapfree(map);
@@ -167,26 +97,6 @@ drm_core_ioremapfree(struct drm_local_map *map, drm_device_t *dev)
}
}
-struct drm_local_map *
-drm_core_findmap(drm_device_t *dev, unsigned long handle)
-{
- drm_local_map_t *map;
-
- DRM_SPINLOCK_ASSERT(&dev->dev_lock);
-
-/*
- * For the time being, we compare the low 32 bit only,
- * We will hash handle to 32-bit to solve this issue later.
- */
- TAILQ_FOREACH(map, &dev->maplist, link) {
- if ((((unsigned long)map->handle) & 0x00000000ffffffff)
- == (handle & 0x00000000ffffffff))
- return (map);
- }
-
- return (NULL);
-}
-
/*
* pci_alloc_consistent()
*/
@@ -202,54 +112,50 @@ static ddi_dma_attr_t hw_dma_attr = {
0xffffffff, /* seg */
1, /* sgllen */
4, /* granular */
- 0 /* flags */
+ DDI_DMA_FLAGERR, /* flags */
};
static ddi_device_acc_attr_t hw_acc_attr = {
DDI_DEVICE_ATTR_V0,
DDI_NEVERSWAP_ACC,
- DDI_STRICTORDER_ACC
+ DDI_STRICTORDER_ACC,
+ DDI_FLAGERR_ACC
};
-
void *
-drm_pci_alloc(drm_device_t *dev, size_t size,
- size_t align, dma_addr_t maxaddr, int segments)
+drm_pci_alloc(struct drm_device *dev, size_t size,
+ size_t align, dma_addr_t maxaddr, int segments)
{
- drm_dma_handle_t *dmah;
- uint_t count;
- int ret = DDI_FAILURE;
+ struct drm_dma_handle *dmah;
+ uint_t count;
- /* allocat continous physical memory for hw status page */
- if (align == 0)
- hw_dma_attr.dma_attr_align = 1;
- else
- hw_dma_attr.dma_attr_align = align;
+ /* allocate continous physical memory for hw status page */
+ hw_dma_attr.dma_attr_align = (!align) ? 1 : align;
hw_dma_attr.dma_attr_addr_hi = maxaddr;
hw_dma_attr.dma_attr_sgllen = segments;
- dmah = kmem_zalloc(sizeof (drm_dma_handle_t), KM_SLEEP);
- if (ret = ddi_dma_alloc_handle(dev->dip, &hw_dma_attr,
- DDI_DMA_SLEEP, NULL, &dmah->dma_hdl)) {
- DRM_ERROR("drm_pci_alloc:ddi_dma_alloc_handle failed\n");
+ dmah = kmem_zalloc(sizeof(struct drm_dma_handle), KM_SLEEP);
+
+ if (ddi_dma_alloc_handle(dev->devinfo, &hw_dma_attr,
+ DDI_DMA_SLEEP, NULL, &dmah->dma_hdl) != DDI_SUCCESS) {
+ DRM_ERROR("ddi_dma_alloc_handle() failed");
goto err3;
}
- if (ret = ddi_dma_mem_alloc(dmah->dma_hdl, size, &hw_acc_attr,
+ if (ddi_dma_mem_alloc(dmah->dma_hdl, size, &hw_acc_attr,
DDI_DMA_CONSISTENT | IOMEM_DATA_UNCACHED,
DDI_DMA_SLEEP, NULL, (caddr_t *)&dmah->vaddr,
- &dmah->real_sz, &dmah->acc_hdl)) {
- DRM_ERROR("drm_pci_alloc: ddi_dma_mem_alloc failed\n");
+ &dmah->real_sz, &dmah->acc_hdl) != DDI_SUCCESS) {
+ DRM_ERROR("ddi_dma_mem_alloc() failed\n");
goto err2;
}
- ret = ddi_dma_addr_bind_handle(dmah->dma_hdl, NULL,
+ if (ddi_dma_addr_bind_handle(dmah->dma_hdl, NULL,
(caddr_t)dmah->vaddr, dmah->real_sz,
- DDI_DMA_RDWR|DDI_DMA_CONSISTENT,
- DDI_DMA_SLEEP, NULL, &dmah->cookie, &count);
- if (ret != DDI_DMA_MAPPED) {
- DRM_ERROR("drm_pci_alloc: alloc phys memory failed");
+ DDI_DMA_RDWR|DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
+ NULL, &dmah->cookie, &count) != DDI_DMA_MAPPED) {
+ DRM_ERROR("ddi_dma_addr_bind_handle() failed");
goto err1;
}
@@ -273,14 +179,11 @@ err3:
return (NULL);
}
-/*
- * pci_free_consistent()
- */
-/*ARGSUSED*/
void
-drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah)
+drm_pci_free(drm_dma_handle_t *dmah)
{
ASSERT(dmah != NULL);
+
(void) ddi_dma_unbind_handle(dmah->dma_hdl);
ddi_dma_mem_free(&dmah->acc_hdl);
ddi_dma_free_handle(&dmah->dma_hdl);
@@ -288,13 +191,13 @@ drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah)
}
int
-do_get_pci_res(drm_device_t *dev, drm_pci_resource_t *resp)
+do_get_pci_res(struct drm_device *dev, drm_pci_resource_t *resp)
{
int length;
pci_regspec_t *regs;
if (ddi_getlongprop(
- DDI_DEV_T_ANY, dev->dip, DDI_PROP_DONTPASS,
+ DDI_DEV_T_ANY, dev->devinfo, DDI_PROP_DONTPASS,
"assigned-addresses", (caddr_t)&regs, &length) !=
DDI_PROP_SUCCESS) {
DRM_ERROR("do_get_pci_res: ddi_getlongprop failed!\n");
@@ -309,16 +212,15 @@ do_get_pci_res(drm_device_t *dev, drm_pci_resource_t *resp)
return (0);
}
-/*ARGSUSED*/
unsigned long
-drm_get_resource_start(drm_device_t *softstate, unsigned int regnum)
+drm_get_resource_start(struct drm_device *dev, unsigned int regnum)
{
drm_pci_resource_t res;
int ret;
res.regnum = regnum;
- ret = do_get_pci_res(softstate, &res);
+ ret = do_get_pci_res(dev, &res);
if (ret != 0) {
DRM_ERROR("drm_get_resource_start: ioctl failed");
@@ -329,9 +231,8 @@ drm_get_resource_start(drm_device_t *softstate, unsigned int regnum)
}
-/*ARGSUSED*/
unsigned long
-drm_get_resource_len(drm_device_t *softstate, unsigned int regnum)
+drm_get_resource_len(struct drm_device *softstate, unsigned int regnum)
{
drm_pci_resource_t res;
int ret;
@@ -347,3 +248,4 @@ drm_get_resource_len(drm_device_t *softstate, unsigned int regnum)
return (res.size);
}
+
diff --git a/usr/src/uts/common/io/drm/drm_rect.c b/usr/src/uts/common/io/drm/drm_rect.c
new file mode 100644
index 0000000..2935f3f
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_rect.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_rect.h"
+
+/**
+ * drm_rect_intersect - intersect two rectangles
+ * @r1: first rectangle
+ * @r2: second rectangle
+ *
+ * Calculate the intersection of rectangles @r1 and @r2.
+ * @r1 will be overwritten with the intersection.
+ *
+ * RETURNS:
+ * %true if rectangle @r1 is still visible after the operation,
+ * %false otherwise.
+ */
+int drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
+{
+ r1->x1 = max(r1->x1, r2->x1);
+ r1->y1 = max(r1->y1, r2->y1);
+ r1->x2 = min(r1->x2, r2->x2);
+ r1->y2 = min(r1->y2, r2->y2);
+
+ return drm_rect_visible(r1);
+}
+
+/**
+ * drm_rect_clip_scaled - perform a scaled clip operation
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @clip: clip rectangle
+ * @hscale: horizontal scaling factor
+ * @vscale: vertical scaling factor
+ *
+ * Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
+ * same amounts multiplied by @hscale and @vscale.
+ *
+ * RETURNS:
+ * %true if rectangle @dst is still visible after being clipped,
+ * %false otherwise
+ */
+int drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
+ const struct drm_rect *clip,
+ int hscale, int vscale)
+{
+ int diff;
+
+ diff = clip->x1 - dst->x1;
+ if (diff > 0) {
+ int64_t tmp = src->x1 + (int64_t) diff * hscale;
+ src->x1 = clamp_int64_t(tmp);
+ }
+ diff = clip->y1 - dst->y1;
+ if (diff > 0) {
+ int64_t tmp = src->y1 + (int64_t) diff * vscale;
+ src->y1 = clamp_int64_t(tmp);
+ }
+ diff = dst->x2 - clip->x2;
+ if (diff > 0) {
+ int64_t tmp = src->x2 - (int64_t) diff * hscale;
+ src->x2 = clamp_int64_t(tmp);
+ }
+ diff = dst->y2 - clip->y2;
+ if (diff > 0) {
+ int64_t tmp = src->y2 - (int64_t) diff * vscale;
+ src->y2 = clamp_int64_t(tmp);
+ }
+
+ return drm_rect_intersect(dst, clip);
+}
+
+static int drm_calc_scale(int src, int dst)
+{
+ int scale = 0;
+
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+
+ if (dst == 0)
+ return 0;
+
+ scale = src / dst;
+
+ return scale;
+}
+
+/**
+ * drm_rect_calc_hscale - calculate the horizontal scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_hscale: minimum allowed horizontal scaling factor
+ * @max_hscale: maximum allowed horizontal scaling factor
+ *
+ * Calculate the horizontal scaling factor as
+ * (@src width) / (@dst width).
+ *
+ * RETURNS:
+ * The horizontal scaling factor, or errno of out of limits.
+ */
+int drm_rect_calc_hscale(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ int min_hscale, int max_hscale)
+{
+ int src_w = drm_rect_width(src);
+ int dst_w = drm_rect_width(dst);
+ int hscale = drm_calc_scale(src_w, dst_w);
+
+ if (hscale < 0 || dst_w == 0)
+ return hscale;
+
+ if (hscale < min_hscale || hscale > max_hscale)
+ return -ERANGE;
+
+ return hscale;
+}
+
+/**
+ * drm_rect_calc_vscale - calculate the vertical scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_vscale: minimum allowed vertical scaling factor
+ * @max_vscale: maximum allowed vertical scaling factor
+ *
+ * Calculate the vertical scaling factor as
+ * (@src height) / (@dst height).
+ *
+ * RETURNS:
+ * The vertical scaling factor, or errno of out of limits.
+ */
+int drm_rect_calc_vscale(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ int min_vscale, int max_vscale)
+{
+ int src_h = drm_rect_height(src);
+ int dst_h = drm_rect_height(dst);
+ int vscale = drm_calc_scale(src_h, dst_h);
+
+ if (vscale < 0 || dst_h == 0)
+ return vscale;
+
+ if (vscale < min_vscale || vscale > max_vscale)
+ return -ERANGE;
+
+ return vscale;
+}
+
+/**
+ * drm_calc_hscale_relaxed - calculate the horizontal scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_hscale: minimum allowed horizontal scaling factor
+ * @max_hscale: maximum allowed horizontal scaling factor
+ *
+ * Calculate the horizontal scaling factor as
+ * (@src width) / (@dst width).
+ *
+ * If the calculated scaling factor is below @min_vscale,
+ * decrease the height of rectangle @dst to compensate.
+ *
+ * If the calculated scaling factor is above @max_vscale,
+ * decrease the height of rectangle @src to compensate.
+ *
+ * RETURNS:
+ * The horizontal scaling factor.
+ */
+int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
+ struct drm_rect *dst,
+ int min_hscale, int max_hscale)
+{
+ int src_w = drm_rect_width(src);
+ int dst_w = drm_rect_width(dst);
+ int hscale = drm_calc_scale(src_w, dst_w);
+
+ if (hscale < 0 || dst_w == 0)
+ return hscale;
+
+ if (hscale < min_hscale) {
+ int max_dst_w = src_w / min_hscale;
+
+ drm_rect_adjust_size(dst, max_dst_w - dst_w, 0);
+
+ return min_hscale;
+ }
+
+ if (hscale > max_hscale) {
+ int max_src_w = dst_w * max_hscale;
+
+ drm_rect_adjust_size(src, max_src_w - src_w, 0);
+
+ return max_hscale;
+ }
+
+ return hscale;
+}
+
+/**
+ * drm_rect_calc_vscale_relaxed - calculate the vertical scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_vscale: minimum allowed vertical scaling factor
+ * @max_vscale: maximum allowed vertical scaling factor
+ *
+ * Calculate the vertical scaling factor as
+ * (@src height) / (@dst height).
+ *
+ * If the calculated scaling factor is below @min_vscale,
+ * decrease the height of rectangle @dst to compensate.
+ *
+ * If the calculated scaling factor is above @max_vscale,
+ * decrease the height of rectangle @src to compensate.
+ *
+ * RETURNS:
+ * The vertical scaling factor.
+ */
+int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
+ struct drm_rect *dst,
+ int min_vscale, int max_vscale)
+{
+ int src_h = drm_rect_height(src);
+ int dst_h = drm_rect_height(dst);
+ int vscale = drm_calc_scale(src_h, dst_h);
+
+ if (vscale < 0 || dst_h == 0)
+ return vscale;
+
+ if (vscale < min_vscale) {
+ int max_dst_h = src_h / min_vscale;
+
+ drm_rect_adjust_size(dst, 0, max_dst_h - dst_h);
+
+ return min_vscale;
+ }
+
+ if (vscale > max_vscale) {
+ int max_src_h = dst_h * max_vscale;
+
+ drm_rect_adjust_size(src, 0, max_src_h - src_h);
+
+ return max_vscale;
+ }
+
+ return vscale;
+}
+
+/**
+ * drm_rect_debug_print - print the rectangle information
+ * @r: rectangle to print
+ * @fixed_point: rectangle is in 16.16 fixed point format
+ */
+void drm_rect_debug_print(const struct drm_rect *r, int fixed_point)
+{
+ int w = drm_rect_width(r);
+ int h = drm_rect_height(r);
+
+ if (fixed_point)
+ DRM_DEBUG_KMS("%d.%06ux%d.%06u%+d.%06u%+d.%06u\n",
+ w >> 16, ((w & 0xffff) * 15625) >> 10,
+ h >> 16, ((h & 0xffff) * 15625) >> 10,
+ r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
+ r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
+ else
+ DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
+}
diff --git a/usr/src/uts/common/io/drm/drm_scatter.c b/usr/src/uts/common/io/drm/drm_scatter.c
index b1d1076..50fc361 100644
--- a/usr/src/uts/common/io/drm/drm_scatter.c
+++ b/usr/src/uts/common/io/drm/drm_scatter.c
@@ -1,13 +1,21 @@
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
*/
-/* BEGIN CSTYLED */
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+/**
+ * \file drm_scatter.c
+ * IOCTLs to manage scatter/gather memory
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
-/* drm_scatter.h -- IOCTLs to manage scatter/gather memory -*- linux-c -*-
- * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com */
-/*-
+/*
+ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
+ *
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
@@ -29,32 +37,16 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
- * Eric Anholt <anholt@FreeBSD.org>
- *
*/
-/* END CSTYLED */
-
-#pragma ident "%Z%%M% %I% %E% SMI"
#include "drmP.h"
-#include <gfx_private.h>
#include "drm_io32.h"
-#define DEBUG_SCATTER 0
+#define DEBUG_SCATTER 0
-#ifdef _LP64
-#define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
-#else
-#define ScatterHandle(x) (unsigned int)(x)
-#endif
-
-void
-drm_sg_cleanup(drm_device_t *dev, drm_sg_mem_t *entry)
+void drm_sg_cleanup(struct drm_sg_mem *entry)
{
- int pages = entry->pages;
+ int pages = entry->pages;
if (entry->busaddr) {
kmem_free(entry->busaddr, sizeof (*entry->busaddr) * pages);
@@ -64,52 +56,46 @@ drm_sg_cleanup(drm_device_t *dev, drm_sg_mem_t *entry)
ASSERT(entry->umem_cookie == NULL);
if (entry->dmah_sg) {
- drm_pci_free(dev, entry->dmah_sg);
+ drm_pci_free(entry->dmah_sg);
entry->dmah_sg = NULL;
}
if (entry->dmah_gart) {
- drm_pci_free(dev, entry->dmah_gart);
+ drm_pci_free(entry->dmah_gart);
entry->dmah_gart = NULL;
}
- if (entry) {
- drm_free(entry, sizeof (drm_sg_mem_t), DRM_MEM_SGLISTS);
- entry = NULL;
- }
+ kfree(entry, sizeof (struct drm_sg_mem));
}
-/*ARGSUSED*/
-int
-drm_sg_alloc(DRM_IOCTL_ARGS)
+#ifdef _LP64
+#define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
+#else
+#define ScatterHandle(x) (unsigned int)(x)
+#endif
+
+int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
{
- DRM_DEVICE;
+ struct drm_sg_mem *entry;
unsigned long pages;
- drm_sg_mem_t *entry;
- drm_dma_handle_t *dmah;
- drm_scatter_gather_t request;
+ drm_dma_handle_t *dmah;
- DRM_DEBUG("%s\n", "drm_sg_alloc");
+ DRM_DEBUG("\n");
+
+ if (!drm_core_check_feature(dev, DRIVER_SG))
+ return -EINVAL;
if (dev->sg)
- return (EINVAL);
+ return -EINVAL;
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_scatter_gather_32_t request32;
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
- DRM_COPYFROM_WITH_RETURN(&request32, (void *)data,
- sizeof (request32));
- request.size = request32.size;
- request.handle = request32.handle;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request, (void *)data,
- sizeof (request));
+ (void) memset(entry, 0, sizeof(*entry));
+ pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+ DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
- pages = btopr(request.size);
- DRM_DEBUG("sg size=%ld pages=%ld\n", request.size, pages);
- entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
entry->pages = (int)pages;
dmah = drm_pci_alloc(dev, ptob(pages), 4096, 0xfffffffful, pages);
if (dmah == NULL)
@@ -119,68 +105,45 @@ drm_sg_alloc(DRM_IOCTL_ARGS)
entry->handle = ScatterHandle((unsigned long)dmah->vaddr);
entry->virtual = (void *)dmah->vaddr;
- request.handle = entry->handle;
+ request->handle = entry->handle;
entry->dmah_sg = dmah;
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_scatter_gather_32_t data32;
- data32.size = (uint32_t)request.size;
- data32.handle = (uint32_t)request.handle;
-
- DRM_COPYTO_WITH_RETURN((void *)data, &data32,
- sizeof (data32));
- } else
-#endif
- DRM_COPYTO_WITH_RETURN((void *)data, &request,
- sizeof (request));
-
- DRM_LOCK();
- if (dev->sg) {
- DRM_UNLOCK();
- drm_sg_cleanup(dev, entry);
- return (EINVAL);
- }
dev->sg = entry;
- DRM_UNLOCK();
- return (0);
+ return 0;
err_exit:
- drm_sg_cleanup(dev, entry);
- return (ENOMEM);
+ drm_sg_cleanup(entry);
+ return -ENOMEM;
}
-/*ARGSUSED*/
-int
-drm_sg_free(DRM_IOCTL_ARGS)
+/* LINTED */
+int drm_sg_alloc_ioctl(DRM_IOCTL_ARGS)
{
- DRM_DEVICE;
- drm_scatter_gather_t request;
- drm_sg_mem_t *entry;
-
-#ifdef _MULTI_DATAMODEL
- if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
- drm_scatter_gather_32_t request32;
-
- DRM_COPYFROM_WITH_RETURN(&request32, (void *)data,
- sizeof (request32));
- request.size = request32.size;
- request.handle = request32.handle;
- } else
-#endif
- DRM_COPYFROM_WITH_RETURN(&request, (void *)data,
- sizeof (request));
+ struct drm_scatter_gather *request = data;
+
+ return drm_sg_alloc(dev, request);
+
+}
+
+/* LINTED */
+int drm_sg_free(DRM_IOCTL_ARGS)
+{
+ struct drm_scatter_gather *request = data;
+ struct drm_sg_mem *entry;
+
+ if (!drm_core_check_feature(dev, DRIVER_SG))
+ return -EINVAL;
- DRM_LOCK();
entry = dev->sg;
dev->sg = NULL;
- DRM_UNLOCK();
- if (!entry || entry->handle != request.handle)
- return (EINVAL);
+ if (!entry || entry->handle != request->handle)
+ return -EINVAL;
+
+ DRM_DEBUG("virtual = %p\n", entry->virtual);
- drm_sg_cleanup(dev, entry);
+ drm_sg_cleanup(entry);
- return (0);
+ return 0;
}
diff --git a/usr/src/uts/common/io/drm/drm_stub.c b/usr/src/uts/common/io/drm/drm_stub.c
index ec82b0a..26501ac 100644
--- a/usr/src/uts/common/io/drm/drm_stub.c
+++ b/usr/src/uts/common/io/drm/drm_stub.c
@@ -353,6 +353,7 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
err_g2:
+err_mem:
kfree(new_minor, sizeof (*new_minor));
err_idr:
(void) idr_remove(&drm_minors_idr, minor_id);
@@ -489,6 +490,8 @@ void drm_put_dev(struct drm_device *dev)
if (dev->driver->unload)
dev->driver->unload(dev);
+ gfxp_mempool_destroy();
+
if (drm_core_has_AGP(dev) && dev->agp) {
drm_agp_cleanup(dev);
kfree(dev->agp, sizeof(*dev->agp));
diff --git a/usr/src/uts/common/io/drm/drm_sun_i2c.c b/usr/src/uts/common/io/drm/drm_sun_i2c.c
new file mode 100644
index 0000000..21b58e7
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sun_i2c.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include "drmP.h"
+
+#define mutex_lock_nested(a,b) mutex_enter(a)
+
+#define I2C_HI_CLOCK(adap, ret) \
+do { \
+ ret = i2c_setscl(adap); \
+ if (ret) { \
+ /* Other master keeps the clock low. */ \
+ /* Free the bus. */ \
+ i2c_setsda(adap); \
+ i2c_udelay(adap); \
+ return ret; \
+ } \
+} while (*"\0");
+
+static inline void
+i2c_udelay(struct i2c_adapter *adap)
+{
+ udelay((adap->udelay + 1) >> 1);
+}
+
+static inline int
+i2c_getsda(struct i2c_adapter *adap)
+{
+ return adap->getsda(adap->data) ? 1 : 0;
+}
+
+static inline void
+i2c_clrsda(struct i2c_adapter *adap)
+{
+ adap->setsda(adap->data, 0);
+}
+
+static inline void
+i2c_setsda(struct i2c_adapter *adap)
+{
+ adap->setsda(adap->data, 1);
+}
+
+static inline int
+i2c_getscl(struct i2c_adapter *adap)
+{
+ return adap->getscl(adap->data) ? 1 : 0;
+}
+
+static inline void
+i2c_clrscl(struct i2c_adapter *adap)
+{
+ adap->setscl(adap->data, 0);
+}
+
+static int
+i2c_setscl(struct i2c_adapter *adap)
+{
+ clock_t start;
+
+ adap->setscl(adap->data, 1);
+
+ /* Clock Synchronization */
+ start = ddi_get_lbolt();
+ while (!i2c_getscl(adap)) {
+ /* FIXME: Does ddi_get_lbolt() return negative
+ * value? If so, leave me.
+ */
+ if ((ddi_get_lbolt() - start) > adap->timeout)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int
+i2c_start(struct i2c_adapter *adap)
+{
+ int ret = 0;
+
+ /* Step 1: free the bus. */
+ i2c_setsda(adap);
+ i2c_udelay(adap);
+ ret = i2c_setscl(adap);
+ if (ret) {
+ /* Other master keeps the clock low.
+ * The bus is busy.
+ */
+ return ret;
+ }
+ if (!i2c_getsda(adap)) {
+ /* The bus is busy. */
+ return -EBUSY;
+ }
+ i2c_udelay(adap);
+
+ /* Step 2: (S/Sr) condition. */
+ i2c_clrsda(adap);
+ i2c_udelay(adap);
+
+ /* Step 3: free the clock. */
+ i2c_clrscl(adap);
+ i2c_udelay(adap);
+
+ return 0;
+}
+
+static int
+i2c_stop(struct i2c_adapter *adap)
+{
+ int ret;
+
+ if (i2c_getscl(adap)) {
+ /* Stop() must be called after start() or any
+ * transfer routines, which all free the clock
+ * before returning.
+ */
+ return -ENOTSUP;
+ }
+
+ /* Step 1: Free the data */
+ i2c_clrsda(adap);
+ i2c_udelay(adap);
+
+ /* Step 2: Hold the clock */
+ I2C_HI_CLOCK(adap, ret);
+ i2c_udelay(adap);
+
+ /* Step 3: (P) condition */
+ i2c_setsda(adap);
+ i2c_udelay(adap);
+ if (!i2c_getsda(adap)) {
+ /* Other master keeps the data low.
+ * The bus is busy.
+ */
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+i2c_write_byte(struct i2c_adapter *adap, unsigned char c)
+{
+ int needARB = 0;
+ int ret = 0, i;
+
+ if (i2c_getscl(adap)) {
+ /* Write() must be called after start() or any
+ * transfer routines, which all free the clock
+ * before returning.
+ */
+ return -ENOTSUP;
+ }
+
+ for (i = 7; i >= 0; i--) {
+ /* Step 1: set data. */
+ if (c & (1 << i)) {
+ needARB = 1;
+ i2c_setsda(adap);
+ } else {
+ needARB = 0;
+ i2c_clrsda(adap);
+ }
+ i2c_udelay(adap);
+
+ /* Step 2: hold the clock. */
+ I2C_HI_CLOCK(adap, ret);
+ if (needARB && !i2c_getsda(adap)) {
+ /* Do arbitration: lose the bus. */
+ return -EBUSY;
+ }
+ /* Double delay. */
+ i2c_udelay(adap);
+ i2c_udelay(adap);
+ if (needARB && !i2c_getsda(adap)) {
+ /* Do arbitration: someone performs (S) condition. */
+ return -EBUSY;
+ }
+
+ /* Step 3: free the clock. */
+ i2c_clrscl(adap);
+ i2c_udelay(adap);
+ }
+
+ return 0;
+}
+
+static int
+i2c_read_byte(struct i2c_adapter *adap, unsigned char *cp)
+{
+ int ret, r, i;
+
+ if (i2c_getscl(adap)) {
+ /* Read() must be called after start() or any
+ * transfer routines, which all free the clock
+ * before returning.
+ */
+ return -ENOTSUP;
+ }
+
+ i2c_setsda(adap);
+ i2c_udelay(adap);
+
+ *cp = 0;
+ for (i = 7; i >= 0; i--) {
+ /* Step 1: hold the clock. */
+ I2C_HI_CLOCK(adap, ret);
+ r = i2c_getsda(adap);
+ i2c_udelay(adap);
+
+ /* Step 2: read the data. */
+ if (r != i2c_getsda(adap)) {
+ /* Do arbitration: someone performs (S/Sr/P) condition. */
+ return -EBUSY;
+ }
+ if (r)
+ *cp |= (1 << i);
+ i2c_udelay(adap);
+
+ /* Step 3: free the clock */
+ i2c_clrscl(adap);
+ i2c_udelay(adap);
+ }
+
+ return 0;
+}
+
+static int
+i2c_ack(struct i2c_adapter *adap)
+{
+ int ret;
+
+ if (i2c_getscl(adap)) {
+ /* Ack() must be called after start() or any
+ * transfer routines, which all free the clock
+ * before returning.
+ */
+ return -ENOTSUP;
+ }
+
+ /* Step 1: free the data. */
+ i2c_clrsda(adap);
+ i2c_udelay(adap);
+
+ /* Step 2: hold the clock. */
+ I2C_HI_CLOCK(adap, ret);
+ i2c_udelay(adap);
+
+ /* Step 3: free the clock */
+ i2c_clrscl(adap);
+ i2c_udelay(adap);
+
+ return 0;
+}
+
+static int
+i2c_no_ack(struct i2c_adapter *adap)
+{
+ int ret;
+
+ if (i2c_getscl(adap)) {
+ /* Nak() must be called after start() or any
+ * transfer routines, which all free the clock
+ * before returning.
+ */
+ return -ENOTSUP;
+ }
+
+ /* Step 1: hold the data. */
+ i2c_setsda(adap);
+ i2c_udelay(adap);
+
+ /* Step 2: hold the clock. */
+ I2C_HI_CLOCK(adap, ret);
+ if (!i2c_getsda(adap)) {
+ /* Other master keeps the data low. */
+ return -EBUSY;
+ }
+ i2c_udelay(adap);
+ if (!i2c_getsda(adap)) {
+ /* Do arbitration: someone performs (S/Sr) condition. */
+ return -EBUSY;
+ }
+
+ /* Step 3: free the clock */
+ i2c_clrscl(adap);
+ i2c_udelay(adap);
+
+ return 0;
+}
+
+static int
+i2c_wait_ack(struct i2c_adapter *adap)
+{
+ int ret;
+
+ if (i2c_getscl(adap)) {
+ /* Wack() must be called after start() or any
+ * transfer routines, which all free the clock
+ * before returning.
+ */
+ return -ENOTSUP;
+ }
+
+ /* Step 1: hold the data. */
+ i2c_setsda(adap);
+ i2c_udelay(adap);
+
+ /* Step 2: hold the clock. */
+ I2C_HI_CLOCK(adap, ret);
+ i2c_udelay(adap);
+
+ /* Step 3: read the data. */
+ ret = i2c_getsda(adap) ? 0 : 1;
+
+ /* Step 4: free the clock */
+ i2c_clrscl(adap);
+ i2c_udelay(adap);
+
+ return ret;
+}
+
+static int
+i2c_write_msg(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ int i, ret;
+
+ for (i = 0; i < msg->len; i++) {
+ ret = i2c_write_byte(adap, msg->buf[i]);
+ if (ret)
+ return ret;
+
+ ret = i2c_wait_ack(adap);
+ if (ret == 1)
+ continue;
+ else if (ret == 0)
+ return -ENXIO;
+ else
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i2c_read_msg(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ unsigned char c;
+ int i, ret;
+
+ for (i = 0; i < msg->len; i++) {
+ ret = i2c_read_byte(adap, &c);
+ if (ret)
+ return ret;
+
+ msg->buf[i] = c;
+
+ if (i < msg->len - 1)
+ ret = i2c_ack(adap);
+ else
+ ret = i2c_no_ack(adap);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i2c_address(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ unsigned char addr;
+ int ret;
+
+ addr = msg->addr << 1;
+ if (msg->flags & I2C_M_RD)
+ addr |= 1;
+
+ ret = i2c_write_byte(adap, addr);
+ if (ret)
+ return ret;
+
+ ret = i2c_wait_ack(adap);
+ if (ret == 1)
+ return 0;
+ else if (ret == 0)
+ return -ENXIO;
+
+ return ret;
+}
+
+static int
+i2c_do_transfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ struct i2c_msg *msg;
+ int i, ret = 0;
+
+ for (i = 0; i < num; i++) {
+ msg = &msgs[i];
+
+ if (!(i && (msg->flags & I2C_M_NOSTART))) {
+ ret = i2c_start(adap);
+ if (ret)
+ return ret;
+
+ ret = i2c_address(adap, msg);
+ if (ret)
+ return ret;
+ }
+
+ if (msg->flags & I2C_M_RD)
+ ret = i2c_read_msg(adap, msg);
+ else
+ ret = i2c_write_msg(adap, msg);
+ if (ret)
+ return ret;
+ }
+
+ ret = i2c_stop(adap);
+ if (ret)
+ return ret;
+
+ return num;
+}
+
+struct i2c_algorithm i2c_bit_algo = {
+ .master_xfer = i2c_do_transfer,
+ .functionality = NULL,
+};
+
+int
+i2c_bit_add_bus(struct i2c_adapter *adap)
+{
+ if (!adap->setscl || !adap->getscl || !adap->setsda || !adap->getsda)
+ return -EINVAL;
+
+ adap->algo = (struct i2c_algorithm *)&i2c_bit_algo;
+ adap->retries = 3;
+
+ return 0;
+}
+
+int
+i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ clock_t start;
+ int i, ret = 0;
+
+ mutex_enter(&adap->bus_lock);
+ start = ddi_get_lbolt();
+ for (i = 0; i <= adap->retries; i++) {
+ ret = adap->algo->master_xfer(adap, msgs, num);
+ switch (ret) {
+ case 0:
+ case ETIMEDOUT:
+ goto do_exit;
+ default:
+ break;
+ }
+
+ /* FIXME: Does ddi_get_lbolt() return negative
+ * value? If so, leave me.
+ */
+ if ((ddi_get_lbolt() - start) > adap->timeout)
+ break;
+ }
+
+do_exit:
+ mutex_exit(&adap->bus_lock);
+ return ret;
+}
diff --git a/usr/src/uts/common/io/drm/drm_sun_idr.c b/usr/src/uts/common/io/drm/drm_sun_idr.c
new file mode 100644
index 0000000..4b59878
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sun_idr.c
@@ -0,0 +1,488 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include <sys/kmem.h>
+#include "drmP.h"
+#include "drm_linux_list.h"
+#include "drm_sun_idr.h"
+
+static inline int
+fr_isfull(struct idr_free_id_range *range)
+{
+ return range->min_unused_id >= range->end;
+}
+
+static struct idr_free_id_range *
+fr_new(int start)
+{
+ struct idr_free_id_range *this;
+
+ this = kmem_zalloc(sizeof(struct idr_free_id_range), KM_SLEEP);
+ this->start = start;
+ this->end = 0x7fffffff;
+ this->min_unused_id = start;
+
+ return this;
+}
+
+static struct idr_free_id_range *
+fr_destroy(struct idr_free_id_range *range)
+{
+ struct idr_free_id_range *ret;
+ struct idr_free_id *id;
+
+ while (range->free_ids) {
+ id = range->free_ids;
+ range->free_ids = range->free_ids->next;
+ kmem_free(id, sizeof(struct idr_free_id));
+ }
+
+ ret = range->next;
+ kmem_free(range, sizeof(struct idr_free_id_range));
+
+ return ret;
+}
+
+static struct idr_free_id_range *
+fr_get(struct idr_free_id_range *list, int start)
+{
+ struct idr_free_id_range *entry = list;
+
+ while (entry && (entry->start != start))
+ entry = entry->next;
+ return entry;
+}
+
+static struct idr_free_id_range *
+fr_insert(struct idr_free_id_range *list, int start)
+{
+ struct idr_free_id_range *prev = list;
+ struct idr_free_id_range *n;
+ struct idr_free_id *id, **pid;
+
+ while (prev->next && prev->next->start < start)
+ prev = prev->next;
+
+ n = fr_new(start);
+
+ /* link the new range */
+ n->next = prev->next;
+ prev->next = n;
+
+ /* change the end of the ranges */
+ prev->end = start;
+ if (n->next)
+ n->end = n->next->start;
+
+ if (fr_isfull(prev)) {
+ /* change the min_unused_id of the ranges */
+ n->min_unused_id = prev->min_unused_id;
+ prev->min_unused_id = start;
+
+ /* link free id to the new range */
+ pid = &prev->free_ids;
+ while (*pid) {
+ if ((*pid)->id < start) {
+ pid = &((*pid)->next);
+ continue;
+ }
+ id = *pid;
+
+ /* remove from the prev range */
+ (*pid) = id->next;
+
+ /* link to the new range */
+ id->next = n->free_ids;
+ n->free_ids = id;
+ }
+ }
+
+ return n;
+}
+
+static int
+idr_compare(const void *a, const void *b)
+{
+ const struct idr_used_id *pa = a;
+ const struct idr_used_id *pb = b;
+
+ if (pa->id < pb->id)
+ return (-1);
+ else if (pa->id > pb->id)
+ return (1);
+ else
+ return (0);
+}
+
+static int
+idr_get_free_id_in_range(struct idr_free_id_range* range)
+{
+ struct idr_free_id *idp;
+ int id;
+
+ if (range->free_ids) {
+ idp = range->free_ids;
+ range->free_ids = idp->next;
+ id = idp->id;
+ kmem_free(idp, sizeof(struct idr_free_id));
+ return (id);
+ }
+
+ if (!fr_isfull(range)) {
+ id = range->min_unused_id;
+ range->min_unused_id++;
+ return (id);
+ }
+
+ return (-1);
+}
+
+void
+idr_init(struct idr *idrp)
+{
+ avl_create(&idrp->used_ids, idr_compare, sizeof(struct idr_used_id),
+ offsetof(struct idr_used_id, link));
+
+ idrp->free_id_ranges = fr_new(0);
+ mutex_init(&idrp->lock, NULL, MUTEX_DRIVER, NULL);
+}
+
+int
+idr_get_new_above(struct idr *idrp, void *obj, int start, int *newid)
+{
+ struct idr_used_id *used;
+ struct idr_free_id_range *range;
+ int id;
+
+ if (start < 0)
+ return (-EINVAL);
+ mutex_enter(&idrp->lock);
+ range = fr_get(idrp->free_id_ranges, start);
+ if (!range)
+ range = fr_insert(idrp->free_id_ranges, start);
+
+ while (range) {
+ id = idr_get_free_id_in_range(range);
+ if (id >= 0)
+ goto got_id;
+ range = range->next;
+ }
+ mutex_exit(&idrp->lock);
+ return (-1);
+
+got_id:
+ used = kmem_alloc(sizeof(struct idr_used_id), KM_NOSLEEP);
+ if (!used) {
+ mutex_exit(&idrp->lock);
+ return (-ENOMEM);
+ }
+
+ used->id = id;
+ used->obj = obj;
+ avl_add(&idrp->used_ids, used);
+
+ *newid = id;
+ mutex_exit(&idrp->lock);
+ return (0);
+}
+
+static struct idr_used_id *
+idr_find_used_id(struct idr *idrp, uint32_t id)
+{
+ struct idr_used_id match;
+ struct idr_used_id *ret;
+
+ match.id = id;
+
+ ret = avl_find(&idrp->used_ids, &match, NULL);
+ if (ret) {
+ return (ret);
+ }
+
+ return (NULL);
+}
+
+void *
+idr_find(struct idr *idrp, uint32_t id)
+{
+ struct idr_used_id *ret;
+
+ mutex_enter(&idrp->lock);
+ ret = idr_find_used_id(idrp, id);
+ if (ret) {
+ mutex_exit(&idrp->lock);
+ return (ret->obj);
+ }
+
+ mutex_exit(&idrp->lock);
+ return (NULL);
+}
+
+int
+idr_remove(struct idr *idrp, uint32_t id)
+{
+ struct idr_free_id_range *range;
+ struct idr_used_id *ide;
+ struct idr_free_id *fid;
+
+ mutex_enter(&idrp->lock);
+ ide = idr_find_used_id(idrp, id);
+ if (!ide) {
+ mutex_exit(&idrp->lock);
+ return (-EINVAL);
+ }
+
+ fid = kmem_alloc(sizeof(struct idr_free_id), KM_NOSLEEP);
+ if (!fid) {
+ mutex_exit(&idrp->lock);
+ return (-ENOMEM);
+ }
+ fid->id = id;
+
+
+ range = idrp->free_id_ranges;
+ while (range->end <= id)
+ range = range->next;
+ fid->next = range->free_ids;
+ range->free_ids = fid;
+ avl_remove(&idrp->used_ids, ide);
+ kmem_free(ide, sizeof (struct idr_used_id));
+ mutex_exit(&idrp->lock);
+
+ return (0);
+}
+
+void
+idr_remove_all(struct idr *idrp)
+{
+ idr_destroy(idrp);
+ idr_init(idrp);
+}
+
+void *
+idr_replace(struct idr *idrp, void *obj, uint32_t id)
+{
+ struct idr_used_id *ide;
+ void *ret;
+ mutex_enter(&idrp->lock);
+ ide = idr_find_used_id(idrp, id);
+ if (!ide) {
+ mutex_exit(&idrp->lock);
+ return (void*)(-EINVAL);
+ }
+
+ ret = ide->obj;
+ ide->obj = obj;
+ mutex_exit(&idrp->lock);
+ return ret;
+}
+
+int
+idr_for_each(struct idr *idrp, int (*fn)(int id, void *p, void *data), void *data)
+{
+ struct idr_used_id *ide;
+ int ret = 0;
+
+ ide = avl_first(&idrp->used_ids);
+ while (ide) {
+ ret = fn(ide->id, ide->obj, data);
+ if (ret)
+ break;
+
+ /* idr node has been removed by fn */
+ ide = AVL_NEXT(&idrp->used_ids, ide);
+ }
+
+ return ret;
+}
+
+int
+/* LINTED */
+idr_pre_get(struct idr *idrp, int flag) {
+ return (-1);
+}
+
+void
+idr_destroy(struct idr *idrp)
+{
+ struct idr_free_id_range *range;
+ struct idr_used_id *ide;
+ void *cookie = NULL;
+
+ while (ide = avl_destroy_nodes(&idrp->used_ids, &cookie))
+ kmem_free(ide, sizeof (struct idr_used_id));
+ avl_destroy(&idrp->used_ids);
+
+ range = idrp->free_id_ranges;
+ while (range)
+ range = fr_destroy(range);
+ idrp->free_id_ranges = NULL;
+
+ mutex_destroy(&idrp->lock);
+}
+
+
+uint32_t fr_id = 0;
+uint32_t fr_time = 0;
+
+int
+/* LINTED */
+idr_list_pre_get(struct idr_list *head, int flag) {
+ return (-1);
+}
+
+void
+idr_list_init(struct idr_list *head)
+{
+ struct idr_list *entry;
+ /* HASH for accelerate */
+ entry = kmem_zalloc(DRM_GEM_OBJIDR_HASHNODE
+ * sizeof (struct idr_list), KM_SLEEP);
+ head->next = entry;
+ for (int i = 0; i < DRM_GEM_OBJIDR_HASHNODE; i++) {
+ INIT_LIST_HEAD(&entry[i]);
+ }
+}
+
+int
+idr_list_get_new_above(struct idr_list *head,
+ void *obj,
+ int *handlep)
+{
+ struct idr_list *entry, *node, *temp;
+ int key, id;
+ void *obj_temp;
+
+ ASSERT(fr_id <= 0x7fffffff);
+ id = ++fr_id;
+ if (id == 0x7fffffff) {
+ fr_time++;
+ id = fr_id = 1;
+ }
+ if (fr_time) {
+ /* find available id */
+ do {
+ obj_temp = idr_list_find(head, id);
+ } while ((obj_temp != NULL) && (++id < 0x7fffffff));
+ if (id < 0x7fffffff) {
+ fr_id = id;
+ } else {
+ fr_id = 0;
+ return (-EAGAIN);
+ }
+ }
+ entry = kmem_zalloc(sizeof (*entry), KM_NOSLEEP);
+ if (entry == NULL)
+ return (-1);
+ ASSERT(id <= 0x7fffffff);
+ key = id % DRM_GEM_OBJIDR_HASHNODE;
+ entry->obj = obj;
+ entry->handle = id;
+
+ /* list add */
+ node = &head->next[key];
+ temp = node->next;
+ entry->prev = node;
+ entry->next = node->next;
+ temp->prev = entry;
+ node->next = entry;
+
+ *handlep = id;
+ return (0);
+}
+
+void *
+idr_list_find(struct idr_list *head,
+ uint32_t name)
+{
+ struct idr_list *entry;
+ int key;
+ key = name % DRM_GEM_OBJIDR_HASHNODE;
+
+ list_for_each(entry, &head->next[key]) {
+ if (entry->handle == name)
+ return (entry->obj);
+ }
+ return (NULL);
+}
+
+#define list_del_idr_list_node(ptr) \
+do { \
+ struct idr_list *n_node = (ptr)->next; \
+ struct idr_list *p_node = (ptr)->prev; \
+ n_node->prev = (ptr)->prev; \
+ p_node->next = (ptr)->next; \
+ (ptr)->prev = NULL; \
+ (ptr)->next = NULL; \
+} while (*"\0")
+
+
+int
+idr_list_remove(struct idr_list *head,
+ uint32_t name)
+{
+ struct idr_list *entry, *temp;
+ int key;
+ key = name % DRM_GEM_OBJIDR_HASHNODE;
+
+ list_for_each_safe(entry, temp, &head->next[key]) {
+ if (entry->handle == name) {
+ list_del_idr_list_node(entry);
+ kmem_free(entry, sizeof (*entry));
+ return (0);
+ }
+ }
+ DRM_ERROR("Failed to remove the object %d", name);
+ return (-1);
+}
+
+void
+idr_list_free(struct idr_list *head)
+{
+ struct idr_list *entry, *temp;
+ for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) {
+ list_for_each_safe(entry, temp, &head->next[key]) {
+ list_del_idr_list_node(entry);
+ kmem_free(entry, sizeof (*entry));
+ }
+ }
+ kmem_free(head->next,
+ DRM_GEM_OBJIDR_HASHNODE * sizeof (struct idr_list));
+ head->next = NULL;
+}
+
+int
+idr_list_empty(struct idr_list *head)
+{
+ int empty;
+ for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) {
+ empty = list_empty(&(head)->next[key]);
+ if (!empty)
+ return (empty);
+ }
+ return (1);
+}
diff --git a/usr/src/uts/common/io/drm/drm_sun_pci.c b/usr/src/uts/common/io/drm/drm_sun_pci.c
new file mode 100644
index 0000000..6bdc517
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sun_pci.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/conf.h>
+#include <sys/kmem.h>
+#include <sys/visual_io.h>
+#include <sys/fbio.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/open.h>
+#include <sys/modctl.h>
+#include <sys/pci.h>
+#include <sys/kd.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/sunldi.h>
+#include <sys/mkdev.h>
+#include "drmP.h"
+#include <sys/agpgart.h>
+#include <sys/agp/agpdefs.h>
+#include <sys/agp/agpmaster_io.h>
+
+#define PCI_BUS(x) (((x) & 0xff0000) >> 16)
+#define PCI_SLOT(x) (((x)>>11) & 0x1f)
+#define PCI_FUNC(x) (((x) & 0x700) >> 8)
+
+struct pci_dev *
+pci_dev_create(struct drm_device *dev)
+{
+ dev_info_t *dip = dev->devinfo;
+ pci_regspec_t *regspec;
+ struct pci_dev *pdev;
+ int *regs, ret, len, i;
+ uint_t nregs = 0;
+
+ pdev = kmem_zalloc(sizeof(struct pci_dev), KM_NOSLEEP);
+ if (!pdev)
+ return (NULL);
+
+ /* access handle */
+ ret = pci_config_setup(dip, &pdev->pci_cfg_acc_handle);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("pci_config_setup() failed");
+ goto err_setup;
+ }
+
+ /* XXX Fix domain number (alpha hoses) */
+ pdev->domain = 0;
+
+ /* bus, slot, func */
+ ret = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "reg", (int **)&regs, &nregs);
+ if (ret != DDI_PROP_SUCCESS) {
+ DRM_ERROR("ddi_prop_lookup_int_array() failed");
+ goto err_info;
+ }
+ pdev->bus = (int)PCI_BUS(regs[0]);
+ pdev->slot = (int)PCI_SLOT(regs[0]);
+ pdev->func = (int)PCI_FUNC(regs[0]);
+ ddi_prop_free(regs);
+
+ /* irq */
+ ret = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "interrupts", -1);
+ if (ret == -1) {
+ DRM_ERROR("ddi_prop_get_int() failed");
+ goto err_irq;
+ }
+ if (ret > 0)
+ pdev->irq = pci_config_get8(pdev->pci_cfg_acc_handle, PCI_CONF_ILINE);
+
+ if (ddi_intr_hilevel(dip, 0) != 0) {
+ DRM_ERROR("high-level interrupts are not supported");
+ goto err_irq;
+ }
+
+ if (ddi_get_iblock_cookie(dip, (uint_t)0,
+ &pdev->intr_block) != DDI_SUCCESS) {
+ DRM_ERROR("cannot get iblock cookie");
+ goto err_irq;
+ }
+
+ /* regions */
+ ret = ddi_getlongprop(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "assigned-addresses", (caddr_t)&regspec, &len);
+ if (ret != DDI_PROP_SUCCESS) {
+ DRM_ERROR("ddi_getlongprop() failed");
+ goto err_regions;
+ }
+ for (i = 0; i < PCI_CONFIG_REGION_NUMS; i++) {
+ pdev->regions[i].start =
+ (unsigned long)regspec[i].pci_phys_low;
+ pdev->regions[i].size =
+ (unsigned long)regspec[i].pci_size_low;
+ }
+ kmem_free(regspec, (size_t)len);
+
+ /* vendor, device */
+ pdev->vendor = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dev->devinfo, DDI_PROP_DONTPASS, "vendor-id", 0);
+ pdev->device = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dev->devinfo, DDI_PROP_DONTPASS, "device-id", 0);
+
+ pdev->dev = dev;
+ return pdev;
+
+err_regions:
+err_irq:
+err_info:
+ pci_config_teardown(&pdev->pci_cfg_acc_handle);
+err_setup:
+ kmem_free(pdev, sizeof(struct pci_dev));
+ return (NULL);
+}
+
+void pci_dev_destroy(struct pci_dev *pdev)
+{
+ pci_config_teardown(&pdev->pci_cfg_acc_handle);
+ kmem_free(pdev, sizeof(struct pci_dev));
+}
+
+void pci_read_config_byte(struct pci_dev *pdev, int where, u8 *val)
+{
+ *val = pci_config_get8(pdev->pci_cfg_acc_handle, where);
+}
+
+void pci_read_config_word(struct pci_dev *pdev, int where, u16 *val)
+{
+ *val = pci_config_get16(pdev->pci_cfg_acc_handle, where);
+}
+
+void pci_read_config_dword(struct pci_dev *pdev, int where, u32 *val)
+{
+ *val = pci_config_get32(pdev->pci_cfg_acc_handle, where);
+}
+
+void pci_write_config_byte(struct pci_dev *pdev, int where, u8 val)
+{
+ pci_config_put8(pdev->pci_cfg_acc_handle, where, val);
+}
+
+void pci_write_config_word(struct pci_dev *pdev, int where, u16 val)
+{
+ pci_config_put16(pdev->pci_cfg_acc_handle, where, val);
+}
+
+void pci_write_config_dword(struct pci_dev *pdev, int where, u32 val)
+{
+ pci_config_put32(pdev->pci_cfg_acc_handle, where, val);
+}
+
+/* LINTED */
+u8* pci_map_rom(struct pci_dev *pdev, size_t *size)
+{
+ u32 base;
+
+ base = 0xC0000;
+ *size = 0x20000;
+
+ return (u8*)drm_sun_ioremap(base, *size, DRM_MEM_UNCACHED);
+}
+
+/* LINTED */
+void pci_unmap_rom(struct pci_dev *pdev, u8 *base)
+{
+ iounmap(base);
+}
+
+int pci_find_capability(struct pci_dev *pdev, int capid)
+{
+ uint8_t cap = 0;
+ uint16_t caps_ptr;
+
+ /* has capabilities list ? */
+ if ((pci_config_get16(pdev->pci_cfg_acc_handle,
+ PCI_CONF_STAT) & PCI_CONF_CAP_MASK) == 0)
+ return (0);
+
+ caps_ptr = pci_config_get8(
+ pdev->pci_cfg_acc_handle, PCI_CONF_CAP_PTR);
+ while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
+ cap = pci_config_get32(
+ pdev->pci_cfg_acc_handle, caps_ptr);
+ if ((cap & PCI_CONF_CAPID_MASK) == capid)
+ return (cap);
+ caps_ptr = pci_config_get8(
+ pdev->pci_cfg_acc_handle,
+ caps_ptr + PCI_CAP_NEXT_PTR);
+ }
+
+ return (0);
+}
diff --git a/usr/src/uts/common/io/drm/drm_sun_timer.c b/usr/src/uts/common/io/drm/drm_sun_timer.c
new file mode 100644
index 0000000..b8039c7
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sun_timer.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include "drm_sun_timer.h"
+
+void
+init_timer(struct timer_list *timer)
+{
+ mutex_init(&timer->lock, NULL, MUTEX_DRIVER, NULL);
+}
+
+void
+destroy_timer(struct timer_list *timer)
+{
+ mutex_destroy(&timer->lock);
+}
+
+void
+setup_timer(struct timer_list *timer, void (*func)(void *), void *arg)
+{
+ timer->func = func;
+ timer->arg = arg;
+ timer->expired_time = 0;
+}
+
+void
+mod_timer(struct timer_list *timer, clock_t expires)
+{
+ mutex_enter(&timer->lock);
+ /*
+ * If timer is already being updated, let previous caller do the work
+ * Note that we must drop timer->lock before the untimeout, as the
+ * callback might call mod_timer and deadlock. So the guard is
+ * centered around the overloading of the timer->expires element.
+ */
+ if (timer->expires == -1) {
+ mutex_exit(&timer->lock);
+ return;
+ }
+ timer->expires = -1;
+ mutex_exit(&timer->lock);
+
+ (void) untimeout(timer->timer_id);
+ timer->expired_time = jiffies + expires;
+ timer->timer_id = timeout(timer->func, timer->arg, expires);
+
+ mutex_enter(&timer->lock);
+ timer->expires = 0;
+ mutex_exit(&timer->lock);
+}
+
+void
+del_timer(struct timer_list *timer)
+{
+ (void) untimeout(timer->timer_id);
+}
+
+void
+test_set_timer(struct timer_list *timer, clock_t expires)
+{
+ if (time_after(jiffies, timer->expired_time)) {
+ timer->expired_time = jiffies + expires;
+ timer->timer_id = timeout(timer->func, timer->arg, expires);
+ }
+}
+
diff --git a/usr/src/uts/common/io/drm/drm_sun_workqueue.c b/usr/src/uts/common/io/drm/drm_sun_workqueue.c
new file mode 100644
index 0000000..8a5ba24
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sun_workqueue.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include <sys/sunddi.h>
+#include <sys/types.h>
+
+#include "drm_sun_workqueue.h"
+
+int
+__queue_work(struct workqueue_struct *wq, struct work_struct *work)
+{
+ int ret;
+
+ ASSERT(wq->taskq != NULL);
+ ASSERT(work->func != NULL);
+ /*
+ * ddi_taskq_dispatch can fail if there aren't enough memory
+ * resources. In theory, since we are requesting a SLEEP
+ * allocation, it would be very rare to fail
+ */
+ if ((ret = ddi_taskq_dispatch(wq->taskq, work->func, work, DDI_SLEEP))
+ == DDI_FAILURE)
+ cmn_err(CE_WARN, "queue_work: ddi_taskq_dispatch failure");
+ return (ret);
+}
+
+void
+init_work(struct work_struct *work, void (*func)(void *))
+{
+ work->func = func;
+}
+
+struct workqueue_struct *
+create_workqueue(dev_info_t *dip, char *name)
+{
+ struct workqueue_struct *wq;
+
+ wq = kmem_zalloc(sizeof (struct workqueue_struct), KM_SLEEP);
+ wq->taskq = ddi_taskq_create(dip, name, 1, TASKQ_DEFAULTPRI, 0);
+ if (wq->taskq == NULL)
+ goto fail;
+ wq->name = name;
+
+ return wq;
+
+fail :
+ kmem_free(wq, sizeof (struct workqueue_struct));
+ return (NULL);
+}
+
+void
+destroy_workqueue(struct workqueue_struct *wq)
+{
+ if (wq) {
+ ddi_taskq_destroy(wq->taskq);
+ kmem_free(wq, sizeof (struct workqueue_struct));
+ }
+}
+
+void
+cancel_delayed_work(struct workqueue_struct *wq)
+{
+ ddi_taskq_wait(wq->taskq);
+}
+void
+flush_workqueue(struct workqueue_struct *wq)
+{
+ ddi_taskq_wait(wq->taskq);
+}
diff --git a/usr/src/uts/common/io/drm/drm_sunmod.c b/usr/src/uts/common/io/drm/drm_sunmod.c
index 2f69229..24971ba 100644
--- a/usr/src/uts/common/io/drm/drm_sunmod.c
+++ b/usr/src/uts/common/io/drm/drm_sunmod.c
@@ -1,27 +1,28 @@
/*
- * CDDL HEADER START
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
*
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
*
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
+ * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
*/
/*
@@ -34,288 +35,427 @@
*/
#include "drm_sunmod.h"
+#include "drm_sun_idr.h"
#include <sys/modctl.h>
#include <sys/kmem.h>
#include <vm/seg_kmem.h>
-static struct modlmisc modlmisc = {
- &mod_miscops, "DRM common interfaces"
+int drm_debug_flag = 0;
+int mdb_track_enable = B_FALSE;
+
+/* Identifier of this driver */
+static struct vis_identifier text_ident = { "SUNWdrm" };
+
+static ddi_device_acc_attr_t dev_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC,
+ DDI_FLAGERR_ACC
};
-static struct modlinkage modlinkage = {
- MODREV_1, (void *)&modlmisc, NULL
+static ddi_device_acc_attr_t gem_dev_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_MERGING_OK_ACC,
+ DDI_FLAGERR_ACC
};
-static drm_inst_list_t *drm_inst_head;
-static kmutex_t drm_inst_list_lock;
+extern int __init drm_core_init(void);
+extern void __exit drm_core_exit(void);
+extern int drm_get_pci_index_reg(dev_info_t *, uint_t, uint_t, off_t *);
-static int drm_sun_open(dev_t *, int, int, cred_t *);
-static int drm_sun_close(dev_t, int, int, cred_t *);
-static int drm_sun_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
-static int drm_sun_devmap(dev_t, devmap_cookie_t, offset_t, size_t,
- size_t *, uint_t);
+struct find_gem_object {
+ offset_t offset;
+ struct drm_gem_object *obj;
+};
-/*
- * devmap callbacks for AGP and PCI GART
- */
-static int drm_devmap_map(devmap_cookie_t, dev_t,
- uint_t, offset_t, size_t, void **);
-static int drm_devmap_dup(devmap_cookie_t, void *,
- devmap_cookie_t, void **);
-static void drm_devmap_unmap(devmap_cookie_t, void *,
- offset_t, size_t, devmap_cookie_t, void **, devmap_cookie_t, void **);
+static int
+drm_devmap_map(devmap_cookie_t dhc, dev_t dev_id, uint_t flags,
+ offset_t offset, size_t len, void **new_priv)
+{
+ _NOTE(ARGUNUSED(offset, len))
-static drm_inst_list_t *drm_supp_alloc_drv_entry(dev_info_t *);
-static drm_inst_state_t *drm_sup_devt_to_state(dev_t);
-static void drm_supp_free_drv_entry(dev_info_t *);
+ devmap_handle_t *dhp;
+ struct ddi_umem_cookie *cp;
+ struct drm_minor *minor;
+ struct drm_device *dev;
+ int minor_id;
-static struct devmap_callback_ctl drm_devmap_callbacks = {
- DEVMAP_OPS_REV, /* devmap_rev */
- drm_devmap_map, /* devmap_map */
- NULL, /* devmap_access */
- drm_devmap_dup, /* devmap_dup */
- drm_devmap_unmap /* devmap_unmap */
-};
+ minor_id = DRM_DEV2MINOR(dev_id);
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ dev = minor->dev;
-/*
- * Common device operations structure for all DRM drivers
- */
-struct cb_ops drm_cb_ops = {
- drm_sun_open, /* cb_open */
- drm_sun_close, /* cb_close */
- nodev, /* cb_strategy */
- nodev, /* cb_print */
- nodev, /* cb_dump */
- nodev, /* cb_read */
- nodev, /* cb_write */
- drm_sun_ioctl, /* cb_ioctl */
- drm_sun_devmap, /* cb_devmap */
- nodev, /* cb_mmap */
- NULL, /* cb_segmap */
- nochpoll, /* cb_chpoll */
- ddi_prop_op, /* cb_prop_op */
- 0, /* cb_stream */
- D_NEW | D_MTSAFE |D_DEVMAP /* cb_flag */
-};
+ /*
+ * This driver only supports MAP_SHARED,
+ * and doesn't support MAP_PRIVATE
+ */
+ if (flags & MAP_PRIVATE) {
+ DRM_ERROR("Not support MAP_PRIVATE");
+ return (EINVAL);
+ }
-int
-_init(void)
+ mutex_enter(&dev->struct_mutex);
+ dhp = (devmap_handle_t *)dhc;
+ cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
+ cp->cook_refcnt = 1;
+ mutex_exit(&dev->struct_mutex);
+
+ *new_priv = dev;
+ return (0);
+}
+
+static int
+drm_devmap_dup(devmap_cookie_t dhc, void *pvtp, devmap_cookie_t new_dhc,
+ void **new_pvtp)
{
- int error;
+ _NOTE(ARGUNUSED(new_dhc))
- if ((error = mod_install(&modlinkage)) != 0) {
- return (error);
- }
+ struct drm_device *dev = (struct drm_device *)pvtp;
+ devmap_handle_t *dhp;
+ struct ddi_umem_cookie *cp;
+
+ mutex_enter(&dev->struct_mutex);
+ dhp = (devmap_handle_t *)dhc;
+ cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
+ cp->cook_refcnt++;
+ mutex_exit(&dev->struct_mutex);
- /* initialize the instance list lock */
- mutex_init(&drm_inst_list_lock, NULL, MUTEX_DRIVER, NULL);
+ *new_pvtp = dev;
return (0);
}
-int
-_fini(void)
+static void
+drm_devmap_unmap(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
+ devmap_cookie_t new_dhp1, void **new_pvtp1, devmap_cookie_t new_dhp2,
+ void **new_pvtp2)
{
- int err;
+ _NOTE(ARGUNUSED(off, len))
- if ((err = mod_remove(&modlinkage)) != 0)
- return (err);
+ struct drm_device *dev;
+ devmap_handle_t *dhp;
+ devmap_handle_t *ndhp;
+ struct ddi_umem_cookie *cp;
+ struct ddi_umem_cookie *ncp;
- mutex_destroy(&drm_inst_list_lock);
- return (0);
+ dhp = (devmap_handle_t *)dhc;
+ dev = (struct drm_device *)pvtp;
+
+ mutex_enter(&dev->struct_mutex);
+
+ cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
+ if (new_dhp1 != NULL) {
+ ndhp = (devmap_handle_t *)new_dhp1;
+ ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
+ ncp->cook_refcnt++;
+ *new_pvtp1 = dev;
+ ASSERT(ncp == cp);
+ }
+
+ if (new_dhp2 != NULL) {
+ ndhp = (devmap_handle_t *)new_dhp2;
+ ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
+ ncp->cook_refcnt++;
+ *new_pvtp2 = dev;
+ ASSERT(ncp == cp);
+ }
+
+ /* FIXME: dh_cookie should not be released here. */
+#if 0
+ cp->cook_refcnt--;
+ if (cp->cook_refcnt == 0) {
+ gfxp_umem_cookie_destroy(dhp->dh_cookie);
+ dhp->dh_cookie = NULL;
+ }
+#endif
+
+ mutex_exit(&dev->struct_mutex);
}
-int
-_info(struct modinfo *modinfop)
+static struct devmap_callback_ctl drm_devmap_callbacks = {
+ DEVMAP_OPS_REV, /* devmap_rev */
+ drm_devmap_map, /* devmap_map */
+ NULL, /* devmap_access */
+ drm_devmap_dup, /* devmap_dup */
+ drm_devmap_unmap /* devmap_unmap */
+};
+
+static struct drm_local_map *
+__find_local_map(struct drm_device *dev, offset_t offset)
{
- return (mod_info(&modlinkage, modinfop));
+ struct drm_map_list *entry;
+
+ entry = idr_find(&dev->map_idr, offset >> PAGE_SHIFT);
+ if (entry)
+ return (entry->map);
+
+ return (NULL);
}
-void *
-drm_supp_register(dev_info_t *dip, drm_device_t *dp)
+static int
+drm_gem_map(devmap_cookie_t dhp, dev_t dev, uint_t flags, offset_t off,
+ size_t len, void **pvtp)
{
- int error;
- char buf[80];
- int instance = ddi_get_instance(dip);
- ddi_acc_handle_t pci_cfg_handle;
- agp_master_softc_t *agpm;
- drm_inst_state_t *mstate;
- drm_inst_list_t *entry;
- gfxp_vgatext_softc_ptr_t gfxp;
- struct dev_ops *devop;
-
- ASSERT(dip != NULL);
-
- entry = drm_supp_alloc_drv_entry(dip);
- if (entry == NULL) {
- cmn_err(CE_WARN, "drm_supp_register: failed to get softstate");
- return (NULL);
+ _NOTE(ARGUNUSED(dhp, flags, len))
+
+ struct drm_device *drm_dev;
+ struct drm_minor *minor;
+ int minor_id = DRM_DEV2MINOR(dev);
+ drm_local_map_t *map = NULL;
+
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ if (!minor->dev)
+ return (ENODEV);
+
+ drm_dev = minor->dev;
+
+ mutex_enter(&drm_dev->struct_mutex);
+ map = __find_local_map(drm_dev, off);
+ if (!map) {
+ mutex_exit(&drm_dev->struct_mutex);
+ *pvtp = NULL;
+ return (DDI_EINVAL);
}
- mstate = &entry->disl_state;
- /*
- * DRM drivers are required to use common cb_ops
- */
- devop = ddi_get_driver(dip);
- if (devop->devo_cb_ops != &drm_cb_ops) {
- devop->devo_cb_ops = &drm_cb_ops;
+ mutex_exit(&drm_dev->struct_mutex);
+
+ *pvtp = map->handle;
+
+ return (DDI_SUCCESS);
+}
+
+static int
+drm_gem_map_access(devmap_cookie_t dhp, void *pvt, offset_t offset, size_t len,
+ uint_t type, uint_t rw)
+{
+ struct drm_device *dev;
+ struct drm_gem_object *obj;
+ struct gem_map_list *seg;
+
+ obj = (struct drm_gem_object *)pvt;
+ if (obj == NULL) {
+ goto next;
}
- /* Generic graphics initialization */
- gfxp = gfxp_vgatext_softc_alloc();
- error = gfxp_vgatext_attach(dip, DDI_ATTACH, gfxp);
- if (error != DDI_SUCCESS) {
- DRM_ERROR("drm_supp_regiter: failed to init gfx");
- goto exit1;
+ dev = obj->dev;
+ if (dev->driver->gem_fault != NULL)
+ dev->driver->gem_fault(obj);
+
+next:
+ if (devmap_load(dhp, offset, len, type, rw)) {
+ return (DDI_FAILURE);
+ }
+ if (obj != NULL) {
+ seg = drm_alloc(sizeof (struct gem_map_list), DRM_MEM_MAPS);
+ if (seg != NULL) {
+ mutex_lock(&dev->page_fault_lock);
+ seg->dhp = dhp;
+ seg->mapoffset = offset;
+ seg->maplen = len;
+ list_add_tail(&seg->head, &obj->seg_list, (caddr_t)seg);
+ mutex_unlock(&dev->page_fault_lock);
+ }
}
+ return (DDI_SUCCESS);
+}
+
+static void
+drm_gem_unmap(devmap_cookie_t dhp, void *pvtp, offset_t off, size_t len,
+ devmap_cookie_t new_dhp1, void **newpvtp1,
+ devmap_cookie_t new_dhp2, void **newpvtp2)
+{
+ struct drm_device *dev;
+ struct drm_gem_object *obj;
+ struct gem_map_list *entry, *temp;
- /* create a minor node for common graphics ops */
- (void) sprintf(buf, "%s%d", GFX_NAME, instance);
- error = ddi_create_minor_node(dip, buf, S_IFCHR,
- INST2NODE0(instance), DDI_NT_DISPLAY, NULL);
- if (error != DDI_SUCCESS) {
- DRM_ERROR("drm_supp_regiter: "
- "failed to create minor node for gfx");
- goto exit2;
+ _NOTE(ARGUNUSED(dhp, pvtp, off, len, new_dhp1, newpvtp1))
+ _NOTE(ARGUNUSED(new_dhp2, newpvtp2))
+
+ obj = (struct drm_gem_object *)pvtp;
+ if (obj == NULL)
+ return;
+
+ dev = obj->dev;
+
+ mutex_lock(&dev->page_fault_lock);
+ if (list_empty(&obj->seg_list)) {
+ mutex_unlock(&dev->page_fault_lock);
+ return;
}
- /* setup mapping for later PCI config space access */
- error = pci_config_setup(dip, &pci_cfg_handle);
- if (error != DDI_SUCCESS) {
- DRM_ERROR("drm_supp_regiter: "
- "PCI configuration space setup failed");
- goto exit2;
+ list_for_each_entry_safe(entry, temp, struct gem_map_list,
+ &obj->seg_list, head) {
+ (void) devmap_unload(entry->dhp, entry->mapoffset,
+ entry->maplen);
+ list_del(&entry->head);
+ drm_free(entry, sizeof (struct gem_map_list), DRM_MEM_MAPS);
}
- /* AGP master attach */
- agpm = NULL;
- if (dp->driver->use_agp) {
- DRM_DEBUG("drm_supp_regiter: driver use AGP\n");
- error = agpmaster_attach(dip, &agpm,
- pci_cfg_handle, INST2NODE1(instance));
- if ((error != DDI_SUCCESS) && (dp->driver->require_agp)) {
- DRM_ERROR("drm_supp_regiter: "
- "AGP master support not available");
- goto exit3;
- }
+ mutex_unlock(&dev->page_fault_lock);
+}
+
+static struct devmap_callback_ctl drm_gem_map_ops = {
+ DEVMAP_OPS_REV, /* devmap_ops version number */
+ drm_gem_map, /* devmap_ops map routine */
+ drm_gem_map_access, /* devmap_ops access routine */
+ NULL, /* devmap_ops dup routine */
+ drm_gem_unmap, /* devmap_ops unmap routine */
+};
+
+static int
+__devmap_general(struct drm_device *dev, devmap_cookie_t dhp,
+ struct drm_local_map *map, size_t len, size_t *maplen)
+{
+ off_t regoff;
+ int regno, ret;
+
+ regno = drm_get_pci_index_reg(dev->devinfo,
+ map->offset, (uint_t)len, &regoff);
+ if (regno < 0) {
+ DRM_ERROR("drm_get_pci_index_reg() failed");
+ return (-EINVAL);
}
- mutex_enter(&mstate->mis_lock);
- mstate->mis_major = ddi_driver_major(dip);
- mstate->mis_dip = dip;
- mstate->mis_gfxp = gfxp;
- mstate->mis_agpm = agpm;
- mstate->mis_cfg_hdl = pci_cfg_handle;
- mstate->mis_devp = dp;
- mutex_exit(&mstate->mis_lock);
-
- /* create minor node for DRM access */
- (void) sprintf(buf, "%s%d", DRM_DEVNODE, instance);
- if (ddi_create_minor_node(dip, buf, S_IFCHR,
- INST2NODE2(instance), DDI_NT_DISPLAY_DRM, 0)) {
- DRM_ERROR("supp_regiter: faled to create minor node for drm");
- goto exit4;
+ ret = devmap_devmem_setup(dhp, dev->devinfo, NULL,
+ regno, (offset_t)regoff, len, PROT_ALL,
+ 0, &dev_attr);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("devmap_devmem_setup failed, ret=%d", ret);
+ return (-EFAULT);
}
- return ((void *)mstate);
+ *maplen = len;
+ return (0);
+}
-exit4:
- if ((dp->driver->use_agp) && agpm)
- agpmaster_detach(&agpm);
-exit3:
- pci_config_teardown(&pci_cfg_handle);
-exit2:
- (void) gfxp_vgatext_detach(dip, DDI_DETACH, gfxp);
-exit1:
- gfxp_vgatext_softc_free(gfxp);
- drm_supp_free_drv_entry(dip);
- ddi_remove_minor_node(dip, NULL);
+static int
+__devmap_shm(struct drm_device *dev, devmap_cookie_t dhp,
+ struct drm_local_map *map, size_t len, size_t *maplen)
+{
+ int ret;
- return (NULL);
-}
+ if (!map->umem_cookie)
+ return (-EINVAL);
+ len = ptob(btopr(map->size));
-int
-drm_supp_unregister(void *handle)
+ ret = devmap_umem_setup(dhp, dev->devinfo,
+ NULL, map->umem_cookie, 0, len, PROT_ALL,
+ IOMEM_DATA_CACHED, NULL);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("devmap_umem_setup failed, ret=%d", ret);
+ return (-EFAULT);
+ }
+
+ *maplen = len;
+ return (0);
+}
+
+static int
+__devmap_agp(struct drm_device *dev, devmap_cookie_t dhp,
+ struct drm_local_map *map, size_t len, size_t *maplen)
{
- drm_inst_list_t *list;
- drm_inst_state_t *mstate;
-
- list = (drm_inst_list_t *)handle;
- mstate = &list->disl_state;
- mutex_enter(&mstate->mis_lock);
-
- /* AGP master detach */
- if (mstate->mis_agpm != NULL)
- agpmaster_detach(&mstate->mis_agpm);
-
- /* free PCI config access handle */
- if (mstate->mis_cfg_hdl)
- pci_config_teardown(&mstate->mis_cfg_hdl);
-
- /* graphics misc module detach */
- if (mstate->mis_gfxp) {
- (void) gfxp_vgatext_detach(mstate->mis_dip, DDI_DETACH,
- mstate->mis_gfxp);
- gfxp_vgatext_softc_free(mstate->mis_gfxp);
+ int ret;
+
+ if (dev->agp == NULL) {
+ DRM_ERROR("attempted to mmap AGP"
+ "memory before AGP support is enabled");
+ return (-ENODEV);
}
- mstate->mis_devp = NULL;
+ len = ptob(btopr(len));
- /* remove all minor nodes */
- ddi_remove_minor_node(mstate->mis_dip, NULL);
- mutex_exit(&mstate->mis_lock);
- drm_supp_free_drv_entry(mstate->mis_dip);
+ ret = devmap_umem_setup(dhp, dev->devinfo,
+ &drm_devmap_callbacks, map->umem_cookie, 0, len, PROT_ALL,
+ IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("devmap_umem_setup() failed, ret=%d", ret);
+ return (-EFAULT);
+ }
- return (DDI_SUCCESS);
+ *maplen = len;
+ return (0);
}
+static int
+__devmap_sg(struct drm_device *dev, devmap_cookie_t dhp,
+ struct drm_local_map *map, size_t len, size_t *maplen)
+{
+ int ret;
+
+ len = ptob(btopr(len));
+ if (len > map->size) {
+ DRM_ERROR("offset=0x%lx, virtual=0x%p, "
+ "mapsize=0x%lx, len=0x%lx",
+ map->offset, dev->sg->virtual, map->size, len);
+ return (-EINVAL);
+ }
+
+ ret = devmap_umem_setup(dhp, dev->devinfo,
+ &drm_devmap_callbacks, map->umem_cookie, 0, len, PROT_ALL,
+ IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("devmap_umem_setup() fail");
+ return (-EFAULT);
+ }
+
+ *maplen = len;
+ return (0);
+}
-/*ARGSUSED*/
static int
-drm_sun_open(dev_t *devp, int flag, int otyp, cred_t *credp)
+__devmap_gem(struct drm_device *dev, devmap_cookie_t dhp,
+ struct drm_local_map *map, size_t *maplen)
{
- drm_inst_state_t *mstate;
- drm_cminor_t *mp, *newp;
- drm_device_t *dp;
- minor_t minor;
- int newminor;
- int instance;
- int err;
-
- mstate = drm_sup_devt_to_state(*devp);
- /*
- * return ENXIO for deferred attach so that system can
- * attach us again.
- */
- if (mstate == NULL)
- return (ENXIO);
+ struct devmap_callback_ctl *callbackops = NULL;
+ int ret;
- /*
- * The lest significant 15 bits are used for minor_number, and
- * the mid 3 bits are used for instance number. All minor numbers
- * are used as follows:
- * 0 -- gfx
- * 1 -- agpmaster
- * 2 -- drm
- * (3, MAX_CLONE_MINOR) -- drm minor node for clone open.
- */
- minor = DEV2MINOR(*devp);
- instance = DEV2INST(*devp);
- ASSERT(minor <= MAX_CLONE_MINOR);
+ if (map->callback == 1) {
+ callbackops = &drm_gem_map_ops;
+ }
+
+ if (!map->umem_cookie)
+ return (-EINVAL);
+
+ ret = gfxp_devmap_umem_setup(dhp, dev->devinfo, callbackops,
+ map->umem_cookie, 0, map->size, PROT_ALL,
+ IOMEM_DATA_UC_WR_COMBINE | DEVMAP_ALLOW_REMAP, &gem_dev_attr);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("gfxp_devmap_umem_setup failed, ret=%d", ret);
+ return (-EFAULT);
+ }
+
+ *maplen = map->size;
+ return (0);
+}
+
+static int
+drm_sun_open(dev_t *dev_id, int flag, int otyp, cred_t *credp)
+{
+ _NOTE(ARGUNUSED(otyp))
+
+ int minor_id = DRM_DEV2MINOR(*dev_id);
+ struct drm_minor *minor;
+ int clone_id;
+ int ret;
+
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ if (!minor->dev)
+ return (ENODEV);
/*
* No operations for VGA & AGP mater devices, always return OK.
*/
- if ((minor == GFX_MINOR) || (minor == AGPMASTER_MINOR))
+ if (DRM_MINOR_IS_VGATEXT(minor_id))
return (0);
- /*
- * From here, we start to process drm
- */
-
- dp = mstate->mis_devp;
- if (!dp)
- return (ENXIO);
+ if (DRM_MINOR_IS_AGPMASTER(minor_id))
+ return (0);
/*
* Drm driver implements a software lock to serialize access
@@ -352,86 +492,79 @@ drm_sun_open(dev_t *devp, int flag, int otyp, cred_t *credp)
* during open()'ing, and find corresponding process struct
* via minor number when close() is called.
*/
- newp = kmem_zalloc(sizeof (drm_cminor_t), KM_SLEEP);
- mutex_enter(&dp->dev_lock);
- for (newminor = DRM_MIN_CLONEMINOR; newminor < MAX_CLONE_MINOR;
- newminor ++) {
- TAILQ_FOREACH(mp, &dp->minordevs, link) {
- if (mp->minor == newminor)
- break;
- }
- if (mp == NULL)
- goto gotminor;
+ ret = idr_get_new_above(&minor->clone_idr, NULL, 0, &clone_id);
+ if (ret)
+ return (EMFILE);
+
+ if (clone_id > DRM_CLONEID_MAX) {
+ (void) idr_remove(&minor->clone_idr, clone_id);
+ return (EMFILE);
}
- mutex_exit(&dp->dev_lock);
- (void) kmem_free(newp, sizeof (drm_cminor_t));
- return (EMFILE);
-
-gotminor:
- TAILQ_INSERT_TAIL(&dp->minordevs, newp, link);
- newp->minor = newminor;
- mutex_exit(&dp->dev_lock);
- err = drm_open(dp, newp, flag, otyp, credp);
- if (err) {
- mutex_enter(&dp->dev_lock);
- TAILQ_REMOVE(&dp->minordevs, newp, link);
- (void) kmem_free(newp, sizeof (drm_cminor_t));
- mutex_exit(&dp->dev_lock);
-
- return (err);
+ ret = drm_open(minor, clone_id, flag, credp);
+ if (ret) {
+ (void) idr_remove(&minor->clone_idr, clone_id);
+ return (-ret);
}
- /* return a clone minor */
- newminor = newminor | (instance << NBITSMNODE);
- *devp = makedevice(getmajor(*devp), newminor);
- return (err);
+ *dev_id = DRM_MAKEDEV(getmajor(*dev_id), minor_id, clone_id);
+
+ return (-ret);
}
-/*ARGSUSED*/
static int
-drm_sun_close(dev_t dev, int flag, int otyp, cred_t *credp)
+drm_sun_close(dev_t dev_id, int flag, int otyp, cred_t *credp)
{
- drm_inst_state_t *mstate;
- drm_device_t *dp;
- minor_t minor;
- int ret;
+ _NOTE(ARGUNUSED(flag, otyp, credp))
- mstate = drm_sup_devt_to_state(dev);
- if (mstate == NULL)
- return (EBADF);
+ struct drm_minor *minor;
+ struct drm_file *file_priv;
+ int minor_id = DRM_DEV2MINOR(dev_id);
+ int clone_id = DRM_DEV2CLONEID(dev_id);
+ int ret = 0;
+
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ if (!minor->dev)
+ return (ENODEV);
- minor = DEV2MINOR(dev);
- ASSERT(minor <= MAX_CLONE_MINOR);
- if ((minor == GFX_MINOR) || (minor == AGPMASTER_MINOR))
+ /*
+ * No operations for VGA & AGP mater devices, always return OK.
+ */
+ if (DRM_MINOR_IS_VGATEXT(minor_id))
return (0);
- dp = mstate->mis_devp;
- if (dp == NULL) {
- DRM_ERROR("drm_sun_close: NULL soft state");
- return (ENXIO);
- }
+ if (DRM_MINOR_IS_AGPMASTER(minor_id))
+ return (0);
- ret = drm_close(dp, minor, flag, otyp, credp);
+ file_priv = idr_find(&minor->clone_idr, clone_id);
+ if (!file_priv)
+ return (EBADF);
+
+ ret = drm_release(file_priv);
+ if (ret)
+ return (-ret);
+
+ (void) idr_remove(&minor->clone_idr, clone_id);
- return (ret);
+ return (0);
}
-/*ARGSUSED*/
static int
-drm_sun_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
- cred_t *credp, int *rvalp)
+drm_sun_ioctl(dev_t dev_id, int cmd, intptr_t arg, int mode, cred_t *credp,
+ int *rvalp)
{
- extern drm_ioctl_desc_t drm_ioctls[];
+ struct drm_minor *minor;
+ struct drm_file *file_priv;
+ int minor_id = DRM_DEV2MINOR(dev_id);
+ int clone_id = DRM_DEV2CLONEID(dev_id);
- drm_inst_state_t *mstate;
- drm_device_t *dp;
- drm_ioctl_desc_t *ioctl;
- drm_ioctl_t *func;
- drm_file_t *fpriv;
- minor_t minor;
- int retval;
- int nr;
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ if (!minor->dev)
+ return (ENODEV);
if (cmd == VIS_GETIDENTIFIER) {
if (ddi_copyout(&text_ident, (void *)arg,
@@ -439,572 +572,223 @@ drm_sun_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
return (EFAULT);
}
- mstate = drm_sup_devt_to_state(dev);
- if (mstate == NULL) {
- return (EIO);
- }
-
- minor = DEV2MINOR(dev);
- ASSERT(minor <= MAX_CLONE_MINOR);
- switch (minor) {
- case GFX_MINOR:
- retval = gfxp_vgatext_ioctl(dev, cmd, arg,
- mode, credp, rvalp, mstate->mis_gfxp);
- return (retval);
-
- case AGPMASTER_MINOR:
- retval = agpmaster_ioctl(dev, cmd, arg, mode,
- credp, rvalp, mstate->mis_agpm);
- return (retval);
-
- case DRM_MINOR:
- default: /* DRM cloning minor nodes */
- break;
- }
-
- dp = mstate->mis_devp;
- ASSERT(dp != NULL);
-
- nr = DRM_IOCTL_NR(cmd);
- ioctl = &drm_ioctls[nr];
- atomic_inc_32(&dp->counts[_DRM_STAT_IOCTLS]);
-
- /* It's not a core DRM ioctl, try driver-specific. */
- if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
- /* The array entries begin at DRM_COMMAND_BASE ioctl nr */
- nr -= DRM_COMMAND_BASE;
- if (nr > dp->driver->max_driver_ioctl) {
- DRM_ERROR("Bad driver ioctl number, 0x%x (of 0x%x)",
- nr, dp->driver->max_driver_ioctl);
- return (EINVAL);
- }
- ioctl = &dp->driver->driver_ioctls[nr];
- }
-
- func = ioctl->func;
- if (func == NULL) {
- return (ENOTSUP);
- }
-
- mutex_enter(&dp->dev_lock);
- fpriv = drm_find_file_by_proc(dp, credp);
- mutex_exit(&dp->dev_lock);
- if (fpriv == NULL) {
- DRM_ERROR("drm_sun_ioctl : can't find authenticator");
- return (EACCES);
- }
-
- if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(credp)) ||
- ((ioctl->flags & DRM_AUTH) && !fpriv->authenticated) ||
- ((ioctl->flags & DRM_MASTER) && !fpriv->master))
- return (EACCES);
+ if (DRM_MINOR_IS_VGATEXT(minor_id))
+ return (gfxp_vgatext_ioctl(dev_id, cmd, arg, mode, credp,
+ rvalp, minor->private));
- fpriv->dev = dev;
- fpriv->credp = credp;
+ if (DRM_MINOR_IS_AGPMASTER(minor_id))
+ return (agpmaster_ioctl(dev_id, cmd, arg, mode, credp,
+ rvalp, minor->private));
- retval = func(dp, arg, fpriv, mode);
+ file_priv = idr_find(&minor->clone_idr, clone_id);
+ if (!file_priv)
+ return (EBADF);
- return (retval);
+ return (-(drm_ioctl(dev_id, file_priv, cmd, arg, mode, credp)));
}
-/*ARGSUSED*/
static int
-drm_sun_devmap(dev_t dev, devmap_cookie_t dhp, offset_t offset,
+drm_sun_devmap(dev_t dev_id, devmap_cookie_t dhp, offset_t offset,
size_t len, size_t *maplen, uint_t model)
{
- extern int drm_get_pci_index_reg(dev_info_t *, uint_t, uint_t, off_t *);
-
- drm_inst_state_t *mstate;
- drm_device_t *dp;
- ddi_umem_cookie_t cookie;
- drm_local_map_t *map = NULL;
- unsigned long aperbase;
- u_offset_t handle;
- offset_t koff;
- caddr_t kva;
- minor_t minor;
- size_t length;
- int ret;
-
- static ddi_device_acc_attr_t dev_attr = {
- DDI_DEVICE_ATTR_V0,
- DDI_NEVERSWAP_ACC,
- DDI_STRICTORDER_ACC,
- };
- static ddi_device_acc_attr_t gem_dev_attr = {
- DDI_DEVICE_ATTR_V0,
- DDI_NEVERSWAP_ACC,
- DDI_MERGING_OK_ACC
- };
-
- mstate = drm_sup_devt_to_state(dev);
- if (mstate == NULL)
- return (ENXIO);
-
- minor = DEV2MINOR(dev);
- switch (minor) {
- case GFX_MINOR:
- ret = gfxp_vgatext_devmap(dev, dhp, offset, len, maplen, model,
- mstate->mis_gfxp);
- return (ret);
-
- case AGPMASTER_MINOR:
+ struct drm_device *dev;
+ struct drm_minor *minor;
+ struct drm_file *file_priv;
+ int minor_id = DRM_DEV2MINOR(dev_id);
+ int clone_id = DRM_DEV2CLONEID(dev_id);
+ drm_local_map_t *map = NULL;
+
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ if (!minor->dev)
+ return (ENODEV);
+
+ dev = minor->dev;
+
+ if (DRM_MINOR_IS_VGATEXT(minor_id))
+ return (gfxp_vgatext_devmap(dev_id, dhp, offset, len,
+ maplen, model, minor->private));
+
+ if (DRM_MINOR_IS_AGPMASTER(minor_id))
return (ENOTSUP);
- case DRM_MINOR:
- break;
-
- default:
- /* DRM cloning nodes */
- if (minor > MAX_CLONE_MINOR)
- return (EBADF);
- break;
- }
-
-
- dp = mstate->mis_devp;
- if (dp == NULL) {
- DRM_ERROR("drm_sun_devmap: NULL soft state");
- return (EINVAL);
- }
-
- mutex_enter(&dp->dev_lock);
-
- if (dp->driver->use_gem == 1) {
- struct idr_list *entry;
- drm_cminor_t *mp;
-
- mp = drm_find_file_by_minor(dp, minor);
- if (!mp) {
- mutex_exit(&dp->dev_lock);
- DRM_ERROR("drm_sun_devmap: can't find authenticator");
- return (EACCES);
- }
-
- spin_lock(&dp->struct_mutex);
- idr_list_for_each(entry, &(mp->fpriv->object_idr)) {
- if ((uintptr_t)entry->obj == (u_offset_t)offset) {
- map = entry->obj->map;
- goto goon;
- }
- }
-goon:
- spin_unlock(&dp->struct_mutex);
- }
-
- if (map == NULL) {
- /*
- * We will solve 32-bit application on 64-bit kernel
- * issue later, now, we just use low 32-bit
- */
- handle = (u_offset_t)offset;
- handle &= 0xffffffff;
-
- TAILQ_FOREACH(map, &dp->maplist, link) {
- if (handle ==
- ((u_offset_t)((uintptr_t)map->handle) & 0xffffffff))
- break;
- }
-
- /*
- * Temporarily, because offset is phys_addr for register
- * and framebuffer, is kernel virtual_addr for others
- * Maybe we will use hash table to solve this issue later.
- */
- if (map == NULL) {
- TAILQ_FOREACH(map, &dp->maplist, link) {
- if (handle == (map->offset & 0xffffffff))
- break;
- }
- }
- }
+ file_priv = idr_find(&minor->clone_idr, clone_id);
+ if (!file_priv)
+ return (EBADF);
- if (map == NULL) {
- u_offset_t tmp;
-
- mutex_exit(&dp->dev_lock);
- cmn_err(CE_WARN, "Can't find map, offset=0x%llx, len=%x\n",
- offset, (int)len);
- cmn_err(CE_WARN, "Current mapping:\n");
- TAILQ_FOREACH(map, &dp->maplist, link) {
- tmp = (u_offset_t)((uintptr_t)map->handle) & 0xffffffff;
- cmn_err(CE_WARN, "map(handle=0x%p, size=0x%lx,type=%d,"
- "offset=0x%lx), handle=%llx, tmp=%lld", map->handle,
- map->size, map->type, map->offset, handle, tmp);
- }
- return (-1);
+ mutex_enter(&dev->struct_mutex);
+ map = __find_local_map(dev, offset);
+ if (!map) {
+ mutex_exit(&dev->struct_mutex);
+ return (EFAULT);
}
if (map->flags & _DRM_RESTRICTED) {
- mutex_exit(&dp->dev_lock);
- cmn_err(CE_WARN, "restricted map\n");
- return (-1);
+ mutex_exit(&dev->struct_mutex);
+ return (ENOTSUP);
}
+ mutex_exit(&dev->struct_mutex);
- mutex_exit(&dp->dev_lock);
switch (map->type) {
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
- {
- int regno;
- off_t regoff;
-
- regno = drm_get_pci_index_reg(dp->dip,
- map->offset, (uint_t)len, &regoff);
- if (regno < 0) {
- DRM_ERROR("devmap: failed to get register"
- " offset=0x%llx, len=0x%x", handle, len);
- return (EINVAL);
- }
-
- ret = devmap_devmem_setup(dhp, dp->dip, NULL,
- regno, (offset_t)regoff, len, PROT_ALL,
- 0, &dev_attr);
- if (ret != 0) {
- *maplen = 0;
- DRM_ERROR("devmap: failed, regno=%d,type=%d,"
- " handle=0x%x, offset=0x%llx, len=0x%x",
- regno, map->type, handle, offset, len);
- return (ret);
- }
- *maplen = len;
- return (ret);
- }
+ return (__devmap_general(dev, dhp, map, len, maplen));
case _DRM_SHM:
- if (map->drm_umem_cookie == NULL)
- return (EINVAL);
- length = ptob(btopr(map->size));
- ret = devmap_umem_setup(dhp, dp->dip, NULL,
- map->drm_umem_cookie, 0, length,
- PROT_ALL, IOMEM_DATA_CACHED, NULL);
- if (ret != 0) {
- *maplen = 0;
- return (ret);
- }
- *maplen = length;
-
- return (DDI_SUCCESS);
+ return (__devmap_shm(dev, dhp, map, len, maplen));
case _DRM_AGP:
- if (dp->agp == NULL) {
- cmn_err(CE_WARN, "drm_sun_devmap: attempted to mmap AGP"
- "memory before AGP support is enabled");
- return (DDI_FAILURE);
- }
-
- aperbase = dp->agp->base;
- koff = map->offset - aperbase;
- length = ptob(btopr(len));
- kva = map->dev_addr;
- cookie = gfxp_umem_cookie_init(kva, length);
- if (cookie == NULL) {
- cmn_err(CE_WARN, "devmap:failed to get umem_cookie");
- return (DDI_FAILURE);
- }
-
- if ((ret = devmap_umem_setup(dhp, dp->dip,
- &drm_devmap_callbacks, cookie, 0, length, PROT_ALL,
- IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr)) < 0) {
- gfxp_umem_cookie_destroy(cookie);
- cmn_err(CE_WARN, "devmap:failed, retval=%d", ret);
- return (DDI_FAILURE);
- }
- *maplen = length;
- break;
+ return (__devmap_agp(dev, dhp, map, len, maplen));
case _DRM_SCATTER_GATHER:
- koff = map->offset - (unsigned long)(caddr_t)dp->sg->virtual;
- kva = map->dev_addr + koff;
- length = ptob(btopr(len));
- if (length > map->size) {
- cmn_err(CE_WARN, "offset=0x%lx, virtual=0x%p,"
- "mapsize=0x%lx,len=0x%lx", map->offset,
- dp->sg->virtual, map->size, len);
- return (DDI_FAILURE);
- }
- cookie = gfxp_umem_cookie_init(kva, length);
- if (cookie == NULL) {
- cmn_err(CE_WARN, "devmap:failed to get umem_cookie");
- return (DDI_FAILURE);
- }
- ret = devmap_umem_setup(dhp, dp->dip,
- &drm_devmap_callbacks, cookie, 0, length, PROT_ALL,
- IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr);
- if (ret != 0) {
- cmn_err(CE_WARN, "sun_devmap: umem_setup fail");
- gfxp_umem_cookie_destroy(cookie);
- return (DDI_FAILURE);
- }
- *maplen = length;
- break;
-
- case _DRM_TTM:
- if (map->drm_umem_cookie == NULL)
- return (EINVAL);
-
- if (gfxp_devmap_umem_setup(dhp, dp->dip,
- NULL, map->drm_umem_cookie, 0, map->size, PROT_ALL,
- IOMEM_DATA_UC_WR_COMBINE | DEVMAP_ALLOW_REMAP,
- &gem_dev_attr)) {
- cmn_err(CE_WARN, "devmap:failed, retval=%d", ret);
- return (DDI_FAILURE);
- }
- *maplen = map->size;
- return (DDI_SUCCESS);
+ return (__devmap_sg(dev, dhp, map, len, maplen));
- default:
- return (DDI_FAILURE);
+ case _DRM_GEM:
+ return (__devmap_gem(dev, dhp, map, maplen));
}
- return (DDI_SUCCESS);
+ return (ENOTSUP);
}
-/*ARGSUSED*/
static int
-drm_devmap_map(devmap_cookie_t dhc, dev_t dev, uint_t flags,
- offset_t offset, size_t len, void **new_priv)
+drm_sun_read(dev_t dev_id, struct uio *uiop, cred_t *credp)
{
- devmap_handle_t *dhp;
- drm_inst_state_t *statep;
- struct ddi_umem_cookie *cp;
+ _NOTE(ARGUNUSED(credp))
+
+ struct drm_minor *minor;
+ struct drm_file *file_priv;
+ int minor_id = DRM_DEV2MINOR(dev_id);
+ int clone_id = DRM_DEV2CLONEID(dev_id);
- statep = drm_sup_devt_to_state(dev);
- ASSERT(statep != NULL);
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ if (!minor->dev)
+ return (ENODEV);
/*
- * This driver only supports MAP_SHARED,
- * and doesn't support MAP_PRIVATE
+ * No operations for VGA & AGP master devices, always return OK.
*/
- if (flags & MAP_PRIVATE) {
- cmn_err(CE_WARN, "!DRM driver doesn't support MAP_PRIVATE");
- return (EINVAL);
- }
+ if (DRM_MINOR_IS_VGATEXT(minor_id))
+ return (0);
- mutex_enter(&statep->dis_ctxlock);
- dhp = (devmap_handle_t *)dhc;
- cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
- cp->cook_refcnt = 1;
- mutex_exit(&statep->dis_ctxlock);
- *new_priv = statep;
+ if (DRM_MINOR_IS_AGPMASTER(minor_id))
+ return (0);
+ file_priv = idr_find(&minor->clone_idr, clone_id);
+ if (!file_priv)
+ return (EBADF);
+
+ (void) drm_read(file_priv, uiop);
return (0);
}
-/*ARGSUSED*/
-static void
-drm_devmap_unmap(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
- devmap_cookie_t new_dhp1, void **new_pvtp1, devmap_cookie_t new_dhp2,
- void **new_pvtp2)
+static int
+drm_sun_chpoll(dev_t dev_id, short events, int anyyet, short *reventsp,
+ struct pollhead **phpp)
{
- devmap_handle_t *dhp;
- devmap_handle_t *ndhp;
- drm_inst_state_t *statep;
- struct ddi_umem_cookie *cp;
- struct ddi_umem_cookie *ncp;
+ struct drm_minor *minor;
+ struct drm_file *file_priv;
+ int minor_id = DRM_DEV2MINOR(dev_id);
+ int clone_id = DRM_DEV2CLONEID(dev_id);
- dhp = (devmap_handle_t *)dhc;
- statep = (drm_inst_state_t *)pvtp;
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (!minor)
+ return (ENODEV);
+ if (!minor->dev)
+ return (ENODEV);
- mutex_enter(&statep->dis_ctxlock);
- cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
- if (new_dhp1 != NULL) {
- ndhp = (devmap_handle_t *)new_dhp1;
- ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
- ncp->cook_refcnt ++;
- *new_pvtp1 = statep;
- ASSERT(ncp == cp);
- }
-
- if (new_dhp2 != NULL) {
- ndhp = (devmap_handle_t *)new_dhp2;
- ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
- ncp->cook_refcnt ++;
- *new_pvtp2 = statep;
- ASSERT(ncp == cp);
- }
-
- cp->cook_refcnt --;
- if (cp->cook_refcnt == 0) {
- gfxp_umem_cookie_destroy(dhp->dh_cookie);
- dhp->dh_cookie = NULL;
- }
- mutex_exit(&statep->dis_ctxlock);
-}
+ /*
+ * No operations for VGA & AGP master devices, always return OK.
+ */
+ if (DRM_MINOR_IS_VGATEXT(minor_id))
+ return (0);
+ if (DRM_MINOR_IS_AGPMASTER(minor_id))
+ return (0);
-/*ARGSUSED*/
-static int
-drm_devmap_dup(devmap_cookie_t dhc, void *pvtp, devmap_cookie_t new_dhc,
- void **new_pvtp)
-{
- devmap_handle_t *dhp;
- drm_inst_state_t *statep;
- struct ddi_umem_cookie *cp;
+ file_priv = idr_find(&minor->clone_idr, clone_id);
+ if (!file_priv)
+ return (EBADF);
- statep = (drm_inst_state_t *)pvtp;
- mutex_enter(&statep->dis_ctxlock);
- dhp = (devmap_handle_t *)dhc;
- cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
- cp->cook_refcnt ++;
- mutex_exit(&statep->dis_ctxlock);
- *new_pvtp = statep;
+ if (!anyyet) {
+ *phpp = &file_priv->drm_pollhead;
+ }
+ *reventsp = drm_poll(file_priv, events);
return (0);
}
-int
-drm_dev_to_instance(dev_t dev)
-{
- return (DEV2INST(dev));
-}
-
/*
- * drm_supp_alloc_drv_entry()
- *
- * Description:
- * Create a DRM entry and add it into the instance list (drm_inst_head).
- * Note that we don't allow a duplicated entry
+ * Common device operations structure for all DRM drivers
*/
-static drm_inst_list_t *
-drm_supp_alloc_drv_entry(dev_info_t *dip)
-{
- drm_inst_list_t **plist;
- drm_inst_list_t *list;
- drm_inst_list_t *entry;
-
- /* protect the driver list */
- mutex_enter(&drm_inst_list_lock);
- plist = &drm_inst_head;
- list = *plist;
- while (list) {
- if (list->disl_state.mis_dip == dip) {
- mutex_exit(&drm_inst_list_lock);
- cmn_err(CE_WARN, "%s%d already registered",
- ddi_driver_name(dip), ddi_get_instance(dip));
- return (NULL);
- }
- plist = &list->disl_next;
- list = list->disl_next;
- }
-
- /* "dip" is not registered, create new one and add to list */
- entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
- *plist = entry;
- entry->disl_state.mis_dip = dip;
- mutex_init(&entry->disl_state.mis_lock, NULL, MUTEX_DRIVER, NULL);
- mutex_init(&entry->disl_state.dis_ctxlock, NULL, MUTEX_DRIVER, NULL);
- mutex_exit(&drm_inst_list_lock);
+struct cb_ops drm_cb_ops = {
+ drm_sun_open, /* cb_open */
+ drm_sun_close, /* cb_close */
+ nodev, /* cb_strategy */
+ nodev, /* cb_print */
+ nodev, /* cb_dump */
+ drm_sun_read, /* cb_read */
+ nodev, /* cb_write */
+ drm_sun_ioctl, /* cb_ioctl */
+ drm_sun_devmap, /* cb_devmap */
+ nodev, /* cb_mmap */
+ NULL, /* cb_segmap */
+ drm_sun_chpoll, /* cb_chpoll */
+ ddi_prop_op, /* cb_prop_op */
+ 0, /* cb_stream */
+ D_NEW | D_MTSAFE | D_DEVMAP /* cb_flag */
+};
- return (entry);
+static struct modlmisc modlmisc = {
+ &mod_miscops, "DRM common interfaces"
+};
-} /* drm_supp_alloc_drv_entry */
+static struct modlinkage modlinkage = {
+ MODREV_1, (void *)&modlmisc, NULL
+};
-/*
- * drm_supp_free_drv_entry()
- */
-static void
-drm_supp_free_drv_entry(dev_info_t *dip)
+int
+_init(void)
{
- drm_inst_list_t *list;
- drm_inst_list_t **plist;
- drm_inst_state_t *mstate;
-
- /* protect the driver list */
- mutex_enter(&drm_inst_list_lock);
- plist = &drm_inst_head;
- list = *plist;
- while (list) {
- if (list->disl_state.mis_dip == dip) {
- *plist = list->disl_next;
- mstate = &list->disl_state;
- mutex_destroy(&mstate->mis_lock);
- mutex_destroy(&mstate->dis_ctxlock);
- kmem_free(list, sizeof (*list));
- mutex_exit(&drm_inst_list_lock);
- return;
- }
- plist = &list->disl_next;
- list = list->disl_next;
- }
- mutex_exit(&drm_inst_list_lock);
+ int ret;
-} /* drm_supp_free_drv_entry() */
+ ret = mod_install(&modlinkage);
+ if (ret)
+ return (ret);
-/*
- * drm_sup_devt_to_state()
- *
- * description:
- * Get the soft state of DRM instance by device number
- */
-static drm_inst_state_t *
-drm_sup_devt_to_state(dev_t dev)
+ return (drm_core_init());
+}
+
+int
+_fini(void)
{
- drm_inst_list_t *list;
- drm_inst_state_t *mstate;
- major_t major = getmajor(dev);
- int instance = DEV2INST(dev);
-
- mutex_enter(&drm_inst_list_lock);
- list = drm_inst_head;
- while (list) {
- mstate = &list->disl_state;
- mutex_enter(&mstate->mis_lock);
-
- if ((mstate->mis_major == major) &&
- (ddi_get_instance(mstate->mis_dip) == instance)) {
- mutex_exit(&mstate->mis_lock);
- mutex_exit(&drm_inst_list_lock);
- return (mstate);
- }
+ int ret;
- list = list->disl_next;
- mutex_exit(&mstate->mis_lock);
- }
+ ret = mod_remove(&modlinkage);
+ if (ret)
+ return (ret);
- mutex_exit(&drm_inst_list_lock);
- return (NULL);
+ drm_core_exit();
-} /* drm_sup_devt_to_state() */
+ return (0);
+}
int
-drm_supp_get_irq(void *handle)
+_info(struct modinfo *modinfop)
{
- drm_inst_list_t *list;
- drm_inst_state_t *mstate;
- int irq;
-
- list = (drm_inst_list_t *)handle;
- mstate = &list->disl_state;
- ASSERT(mstate != NULL);
- irq = pci_config_get8(mstate->mis_cfg_hdl, PCI_CONF_ILINE);
- return (irq);
+ return (mod_info(&modlinkage, modinfop));
}
-int
-drm_supp_device_capability(void *handle, int capid)
+struct drm_local_map *
+drm_core_findmap(struct drm_device *dev, unsigned int token)
{
- drm_inst_list_t *list;
- drm_inst_state_t *mstate;
- uint8_t cap = 0;
- uint16_t caps_ptr;
-
- list = (drm_inst_list_t *)handle;
- mstate = &list->disl_state;
- ASSERT(mstate != NULL);
-
- /* has capabilities list ? */
- if ((pci_config_get16(mstate->mis_cfg_hdl, PCI_CONF_STAT) &
- PCI_CONF_CAP_MASK) == 0)
- return (NULL);
-
- caps_ptr = pci_config_get8(mstate->mis_cfg_hdl, PCI_CONF_CAP_PTR);
- while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
- cap = pci_config_get32(mstate->mis_cfg_hdl, caps_ptr);
- if ((cap & PCI_CONF_CAPID_MASK) == capid)
- return (cap);
- caps_ptr = pci_config_get8(mstate->mis_cfg_hdl,
- caps_ptr + PCI_CAP_NEXT_PTR);
+ struct drm_map_list *_entry;
+
+ list_for_each_entry(_entry, struct drm_map_list, &dev->maplist, head) {
+ if (_entry->user_token == token)
+ return (_entry->map);
}
- return (0);
+ return (NULL);
}
diff --git a/usr/src/uts/common/io/drm/drm_sunmod.h b/usr/src/uts/common/io/drm/drm_sunmod.h
deleted file mode 100644
index 32cd5c0..0000000
--- a/usr/src/uts/common/io/drm/drm_sunmod.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#pragma ident "%Z%%M% %I% %E% SMI"
-
-/*
- * Common misc module interfaces of DRM under Solaris
- */
-
-/*
- * I915 DRM Driver for Solaris
- *
- * This driver provides the hardware 3D acceleration support for Intel
- * integrated video devices (e.g. i8xx/i915/i945 series chipsets), under the
- * DRI (Direct Rendering Infrastructure). DRM (Direct Rendering Manager) here
- * means the kernel device driver in DRI.
- *
- * I915 driver is a device dependent driver only, it depends on a misc module
- * named drm for generic DRM operations.
- *
- * This driver also calls into gfx and agpmaster misc modules respectively for
- * generic graphics operations and AGP master device support.
- */
-
-#ifndef _SYS_DRM_SUNMOD_H_
-#define _SYS_DRM_SUNMOD_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <sys/types.h>
-#include <sys/errno.h>
-#include <sys/conf.h>
-#include <sys/kmem.h>
-#include <sys/visual_io.h>
-#include <sys/font.h>
-#include <sys/fbio.h>
-#include <sys/ddi.h>
-#include <sys/sunddi.h>
-#include <sys/stat.h>
-#include <sys/file.h>
-#include <sys/open.h>
-#include <sys/modctl.h>
-#include <sys/vgareg.h>
-#include <sys/vgasubr.h>
-#include <sys/pci.h>
-#include <sys/kd.h>
-#include <sys/ddi_impldefs.h>
-#include <sys/sunldi.h>
-#include <sys/mkdev.h>
-#include <gfx_private.h>
-#include <sys/agpgart.h>
-#include <sys/agp/agpdefs.h>
-#include <sys/agp/agpmaster_io.h>
-#include "drmP.h"
-#include <sys/modctl.h>
-
-/*
- * dev_t of this driver looks consists of:
- *
- * major number with NBITSMAJOR bits
- * instance node number with NBITSINST bits
- * minor node number with NBITSMINOR - NBITSINST bits
- *
- * Each instance has at most 2^(NBITSMINOR - NBITSINST) minor nodes, the first
- * three are:
- * 0: gfx<instance number>, graphics common node
- * 1: agpmaster<instance number>, agpmaster node
- * 2: drm<instance number>, drm node
- */
-#define GFX_MINOR 0
-#define AGPMASTER_MINOR 1
-#define DRM_MINOR 2
-#define DRM_MIN_CLONEMINOR 3
-
-/*
- * Number of bits occupied by instance number in dev_t, currently maximum 8
- * instances are supported.
- */
-#define NBITSINST 3
-
-/* Number of bits occupied in dev_t by minor node */
-#define NBITSMNODE (18 - NBITSINST)
-
-/*
- * DRM use a "cloning" minor node mechanism to release lock on every close(2),
- * thus there will be a minor node for every open(2) operation. Here we give
- * the maximum DRM cloning minor node number.
- */
-#define MAX_CLONE_MINOR (1 << (NBITSMNODE) - 1)
-#define DEV2MINOR(dev) (getminor(dev) & ((1 << (NBITSMNODE)) - 1))
-#define DEV2INST(dev) (getminor(dev) >> NBITSMNODE)
-#define INST2NODE0(inst) ((inst) << NBITSMNODE)
-#define INST2NODE1(inst) (((inst) << NBITSMNODE) + AGPMASTER_MINOR)
-#define INST2NODE2(inst) (((inst) << NBITSMNODE) + DRM_MINOR)
-
-/* graphics name for the common graphics minor node */
-#define GFX_NAME "gfx"
-
-
-/*
- * softstate for DRM module
- */
-typedef struct drm_instance_state {
- kmutex_t mis_lock;
- kmutex_t dis_ctxlock;
- major_t mis_major;
- dev_info_t *mis_dip;
- drm_device_t *mis_devp;
- ddi_acc_handle_t mis_cfg_hdl;
- agp_master_softc_t *mis_agpm; /* agpmaster softstate ptr */
- gfxp_vgatext_softc_ptr_t mis_gfxp; /* gfx softstate */
-} drm_inst_state_t;
-
-
-struct drm_inst_state_list {
- drm_inst_state_t disl_state;
- struct drm_inst_state_list *disl_next;
-
-};
-typedef struct drm_inst_state_list drm_inst_list_t;
-
-
-/* Identifier of this driver */
-static struct vis_identifier text_ident = { "SUNWdrm" };
-static int drm_sun_open(dev_t *, int, int, cred_t *);
-static int drm_sun_close(dev_t, int, int, cred_t *);
-static int drm_sun_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
-static int drm_sun_devmap(dev_t, devmap_cookie_t,
- offset_t, size_t, size_t *, uint_t);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SYS_DRM_SUNMOD_H_ */
diff --git a/usr/src/uts/common/io/drm/drm_sysfs.c b/usr/src/uts/common/io/drm/drm_sysfs.c
new file mode 100644
index 0000000..ac59805
--- /dev/null
+++ b/usr/src/uts/common/io/drm/drm_sysfs.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/conf.h>
+#include <sys/kmem.h>
+#include <sys/visual_io.h>
+#include <sys/fbio.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/open.h>
+#include <sys/modctl.h>
+#include <sys/pci.h>
+#include <sys/kd.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/sunldi.h>
+#include <sys/mkdev.h>
+#include "drmP.h"
+#include <sys/agpgart.h>
+#include <sys/agp/agpdefs.h>
+#include <sys/agp/agpmaster_io.h>
+
+/**
+ * drm_sysfs_device_add - adds a class device to sysfs for a character driver
+ * @dev: DRM device to be added
+ * @head: DRM head in question
+ *
+ * Add a DRM device to the DRM's device model class. We use @dev's PCI device
+ * as the parent for the Linux device, and make sure it has a file containing
+ * the driver we're using (for userspace compatibility).
+ */
+int drm_sysfs_device_add(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ gfxp_vgatext_softc_ptr_t gfxp;
+ int ret;
+
+ switch (minor->type) {
+ case DRM_MINOR_AGPMASTER:
+ ret = agpmaster_attach(dev->devinfo,
+ (agp_master_softc_t **)&minor->private,
+ dev->pdev->pci_cfg_acc_handle, minor->index);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("agpmaster_attach failed");
+ return (ret);
+ }
+ return (0);
+
+ case DRM_MINOR_VGATEXT:
+ /* Generic graphics initialization */
+ gfxp = gfxp_vgatext_softc_alloc();
+ ret = gfxp_vgatext_attach(dev->devinfo, DDI_ATTACH, gfxp);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("gfxp_vgatext_attach failed");
+ return (EFAULT);
+ }
+ minor->private = gfxp;
+
+ ret = ddi_create_minor_node(dev->devinfo,
+ minor->name, S_IFCHR, minor->index, DDI_NT_DISPLAY, NULL);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("ddi_create_minor_node failed");
+ return (EFAULT);
+ }
+ return (0);
+
+ case DRM_MINOR_LEGACY:
+ case DRM_MINOR_CONTROL:
+ case DRM_MINOR_RENDER:
+ ret = ddi_create_minor_node(dev->devinfo,
+ minor->name, S_IFCHR, minor->index, DDI_NT_DISPLAY_DRM, NULL);
+ if (ret != DDI_SUCCESS) {
+ DRM_ERROR("ddi_create_minor_node failed");
+ return (EFAULT);
+ }
+ return (0);
+ }
+
+ return (ENOTSUP);
+}
+
+/**
+ * drm_sysfs_device_remove - remove DRM device
+ * @dev: DRM device to remove
+ *
+ * This call unregisters and cleans up a class device that was created with a
+ * call to drm_sysfs_device_add()
+ */
+void drm_sysfs_device_remove(struct drm_minor *minor)
+{
+ switch (minor->type) {
+ case DRM_MINOR_AGPMASTER:
+ if (minor->private) {
+ agpmaster_detach(
+ (agp_master_softc_t **)&minor->private);
+ minor->private = NULL;
+ }
+ break;
+
+ case DRM_MINOR_VGATEXT:
+ if (minor->private) {
+ (void) gfxp_vgatext_detach(minor->dev->devinfo,
+ DDI_DETACH, minor->private);
+ gfxp_vgatext_softc_free(minor->private);
+ minor->private = NULL;
+ }
+
+ /* LINTED */
+ case DRM_MINOR_LEGACY:
+ case DRM_MINOR_CONTROL:
+ case DRM_MINOR_RENDER:
+ ddi_remove_minor_node(minor->dev->devinfo, minor->name);
+ }
+}
diff --git a/usr/src/uts/common/io/drm/queue.h b/usr/src/uts/common/io/drm/queue.h
deleted file mode 100644
index 4994209..0000000
--- a/usr/src/uts/common/io/drm/queue.h
+++ /dev/null
@@ -1,585 +0,0 @@
-/* BEGIN CSTYLED */
-/*-
- * Copyright (c) 1991, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)queue.h 8.5 (Berkeley) 8/20/94
- * $FreeBSD: /repoman/r/ncvs/src/sys/sys/queue.h,v 1.66 2006/05/26 18:17:53 emaste Exp $
- */
-
-#ifndef _SYS_QUEUE_H_
-#define _SYS_QUEUE_H_
-
-#pragma ident "%Z%%M% %I% %E% SMI"
-
-/*
- * This file defines four types of data structures: singly-linked lists,
- * singly-linked tail queues, lists and tail queues.
- *
- * A singly-linked list is headed by a single forward pointer. The elements
- * are singly linked for minimum space and pointer manipulation overhead at
- * the expense of O(n) removal for arbitrary elements. New elements can be
- * added to the list after an existing element or at the head of the list.
- * Elements being removed from the head of the list should use the explicit
- * macro for this purpose for optimum efficiency. A singly-linked list may
- * only be traversed in the forward direction. Singly-linked lists are ideal
- * for applications with large datasets and few or no removals or for
- * implementing a LIFO queue.
- *
- * A singly-linked tail queue is headed by a pair of pointers, one to the
- * head of the list and the other to the tail of the list. The elements are
- * singly linked for minimum space and pointer manipulation overhead at the
- * expense of O(n) removal for arbitrary elements. New elements can be added
- * to the list after an existing element, at the head of the list, or at the
- * end of the list. Elements being removed from the head of the tail queue
- * should use the explicit macro for this purpose for optimum efficiency.
- * A singly-linked tail queue may only be traversed in the forward direction.
- * Singly-linked tail queues are ideal for applications with large datasets
- * and few or no removals or for implementing a FIFO queue.
- *
- * A list is headed by a single forward pointer (or an array of forward
- * pointers for a hash table header). The elements are doubly linked
- * so that an arbitrary element can be removed without a need to
- * traverse the list. New elements can be added to the list before
- * or after an existing element or at the head of the list. A list
- * may only be traversed in the forward direction.
- *
- * A tail queue is headed by a pair of pointers, one to the head of the
- * list and the other to the tail of the list. The elements are doubly
- * linked so that an arbitrary element can be removed without a need to
- * traverse the list. New elements can be added to the list before or
- * after an existing element, at the head of the list, or at the end of
- * the list. A tail queue may be traversed in either direction.
- *
- * For details on the use of these macros, see the queue(3) manual page.
- *
- *
- * SLIST LIST STAILQ TAILQ
- * _HEAD + + + +
- * _HEAD_INITIALIZER + + + +
- * _ENTRY + + + +
- * _INIT + + + +
- * _EMPTY + + + +
- * _FIRST + + + +
- * _NEXT + + + +
- * _PREV - - - +
- * _LAST - - + +
- * _FOREACH + + + +
- * _FOREACH_SAFE + + + +
- * _FOREACH_REVERSE - - - +
- * _FOREACH_REVERSE_SAFE - - - +
- * _INSERT_HEAD + + + +
- * _INSERT_BEFORE - + - +
- * _INSERT_AFTER + + + +
- * _INSERT_TAIL - - + +
- * _CONCAT - - + +
- * _REMOVE_HEAD + - + -
- * _REMOVE + + + +
- *
- */
-#ifdef QUEUE_MACRO_DEBUG
-/* Store the last 2 places the queue element or head was altered */
-struct qm_trace {
- char * lastfile;
- int lastline;
- char * prevfile;
- int prevline;
-};
-
-#define TRACEBUF struct qm_trace trace;
-#define TRASHIT(x) do {(x) = (void *)-1;} while (*"\0")
-
-#define QMD_TRACE_HEAD(head) do { \
- (head)->trace.prevline = (head)->trace.lastline; \
- (head)->trace.prevfile = (head)->trace.lastfile; \
- (head)->trace.lastline = __LINE__; \
- (head)->trace.lastfile = __FILE__; \
-} while (*"\0")
-
-#define QMD_TRACE_ELEM(elem) do { \
- (elem)->trace.prevline = (elem)->trace.lastline; \
- (elem)->trace.prevfile = (elem)->trace.lastfile; \
- (elem)->trace.lastline = __LINE__; \
- (elem)->trace.lastfile = __FILE__; \
-} while (*"\0")
-
-#else
-#define QMD_TRACE_ELEM(elem)
-#define QMD_TRACE_HEAD(head)
-#define TRACEBUF
-#define TRASHIT(x)
-#endif /* QUEUE_MACRO_DEBUG */
-
-/*
- * Singly-linked List declarations.
- */
-#define SLIST_HEAD(name, type) \
-struct name { \
- struct type *slh_first; /* first element */ \
-}
-
-#define SLIST_HEAD_INITIALIZER(head) \
- { NULL }
-
-#define SLIST_ENTRY(type) \
-struct { \
- struct type *sle_next; /* next element */ \
-}
-
-/*
- * Singly-linked List functions.
- */
-#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
-
-#define SLIST_FIRST(head) ((head)->slh_first)
-
-#define SLIST_FOREACH(var, head, field) \
- for ((var) = SLIST_FIRST((head)); \
- (var); \
- (var) = SLIST_NEXT((var), field))
-
-#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
- for ((var) = SLIST_FIRST((head)); \
- (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
- (var) = (tvar))
-
-#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
- for ((varp) = &SLIST_FIRST((head)); \
- ((var) = *(varp)) != NULL; \
- (varp) = &SLIST_NEXT((var), field))
-
-#define SLIST_INIT(head) do { \
- SLIST_FIRST((head)) = NULL; \
-} while (*"\0")
-
-#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
- SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
- SLIST_NEXT((slistelm), field) = (elm); \
-} while (*"\0")
-
-#define SLIST_INSERT_HEAD(head, elm, field) do { \
- SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
- SLIST_FIRST((head)) = (elm); \
-} while (*"\0")
-
-#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
-
-#define SLIST_REMOVE(head, elm, type, field) do { \
- if (SLIST_FIRST((head)) == (elm)) { \
- SLIST_REMOVE_HEAD((head), field); \
- } \
- else { \
- struct type *curelm = SLIST_FIRST((head)); \
- while (SLIST_NEXT(curelm, field) != (elm)) \
- curelm = SLIST_NEXT(curelm, field); \
- SLIST_NEXT(curelm, field) = \
- SLIST_NEXT(SLIST_NEXT(curelm, field), field); \
- } \
- TRASHIT((elm)->field.sle_next); \
-} while (*"\0")
-
-#define SLIST_REMOVE_HEAD(head, field) do { \
- SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
-} while (*"\0")
-
-/*
- * Singly-linked Tail queue declarations.
- */
-#define STAILQ_HEAD(name, type) \
-struct name { \
- struct type *stqh_first;/* first element */ \
- struct type **stqh_last;/* addr of last next element */ \
-}
-
-#define STAILQ_HEAD_INITIALIZER(head) \
- { NULL, &(head).stqh_first }
-
-#define STAILQ_ENTRY(type) \
-struct { \
- struct type *stqe_next; /* next element */ \
-}
-
-/*
- * Singly-linked Tail queue functions.
- */
-#define STAILQ_CONCAT(head1, head2) do { \
- if (!STAILQ_EMPTY((head2))) { \
- *(head1)->stqh_last = (head2)->stqh_first; \
- (head1)->stqh_last = (head2)->stqh_last; \
- STAILQ_INIT((head2)); \
- } \
-} while (*"\0")
-
-#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
-
-#define STAILQ_FIRST(head) ((head)->stqh_first)
-
-#define STAILQ_FOREACH(var, head, field) \
- for((var) = STAILQ_FIRST((head)); \
- (var); \
- (var) = STAILQ_NEXT((var), field))
-
-
-#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
- for ((var) = STAILQ_FIRST((head)); \
- (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
- (var) = (tvar))
-
-#define STAILQ_INIT(head) do { \
- STAILQ_FIRST((head)) = NULL; \
- (head)->stqh_last = &STAILQ_FIRST((head)); \
-} while (*"\0")
-
-#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
- if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
- (head)->stqh_last = &STAILQ_NEXT((elm), field); \
- STAILQ_NEXT((tqelm), field) = (elm); \
-} while (*"\0")
-
-#define STAILQ_INSERT_HEAD(head, elm, field) do { \
- if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
- (head)->stqh_last = &STAILQ_NEXT((elm), field); \
- STAILQ_FIRST((head)) = (elm); \
-} while (*"\0")
-
-#define STAILQ_INSERT_TAIL(head, elm, field) do { \
- STAILQ_NEXT((elm), field) = NULL; \
- *(head)->stqh_last = (elm); \
- (head)->stqh_last = &STAILQ_NEXT((elm), field); \
-} while (*"\0")
-
-#define STAILQ_LAST(head, type, field) \
- (STAILQ_EMPTY((head)) ? \
- NULL : \
- ((struct type *)(void *) \
- ((char *)((head)->stqh_last) - __offsetof(struct type, field))))
-
-#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
-
-#define STAILQ_REMOVE(head, elm, type, field) do { \
- if (STAILQ_FIRST((head)) == (elm)) { \
- STAILQ_REMOVE_HEAD((head), field); \
- } \
- else { \
- struct type *curelm = STAILQ_FIRST((head)); \
- while (STAILQ_NEXT(curelm, field) != (elm)) \
- curelm = STAILQ_NEXT(curelm, field); \
- if ((STAILQ_NEXT(curelm, field) = \
- STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\
- (head)->stqh_last = &STAILQ_NEXT((curelm), field);\
- } \
- TRASHIT((elm)->field.stqe_next); \
-} while (*"\0")
-
-#define STAILQ_REMOVE_HEAD(head, field) do { \
- if ((STAILQ_FIRST((head)) = \
- STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
- (head)->stqh_last = &STAILQ_FIRST((head)); \
-} while (*"\0")
-
-#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
- if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
- (head)->stqh_last = &STAILQ_FIRST((head)); \
-} while (*"\0")
-
-/*
- * List declarations.
- */
-#define LIST_HEAD(name, type) \
-struct name { \
- struct type *lh_first; /* first element */ \
-}
-
-#define LIST_HEAD_INITIALIZER(head) \
- { NULL }
-
-#define LIST_ENTRY(type) \
-struct { \
- struct type *le_next; /* next element */ \
- struct type **le_prev; /* address of previous next element */ \
-}
-
-/*
- * List functions.
- */
-
-#if (defined(_KERNEL) && defined(INVARIANTS))
-#define QMD_LIST_CHECK_HEAD(head, field) do { \
- if (LIST_FIRST((head)) != NULL && \
- LIST_FIRST((head))->field.le_prev != \
- &LIST_FIRST((head))) \
- panic("Bad list head %p first->prev != head", (head)); \
-} while (*"\0")
-
-#define QMD_LIST_CHECK_NEXT(elm, field) do { \
- if (LIST_NEXT((elm), field) != NULL && \
- LIST_NEXT((elm), field)->field.le_prev != \
- &((elm)->field.le_next)) \
- panic("Bad link elm %p next->prev != elm", (elm)); \
-} while (*"\0")
-
-#define QMD_LIST_CHECK_PREV(elm, field) do { \
- if (*(elm)->field.le_prev != (elm)) \
- panic("Bad link elm %p prev->next != elm", (elm)); \
-} while (*"\0")
-#else
-#define QMD_LIST_CHECK_HEAD(head, field)
-#define QMD_LIST_CHECK_NEXT(elm, field)
-#define QMD_LIST_CHECK_PREV(elm, field)
-#endif /* (_KERNEL && INVARIANTS) */
-
-#define LIST_EMPTY(head) ((head)->lh_first == NULL)
-
-#define LIST_FIRST(head) ((head)->lh_first)
-
-#define LIST_FOREACH(var, head, field) \
- for ((var) = LIST_FIRST((head)); \
- (var); \
- (var) = LIST_NEXT((var), field))
-
-#define LIST_FOREACH_SAFE(var, head, field, tvar) \
- for ((var) = LIST_FIRST((head)); \
- (var) && ((tvar) = LIST_NEXT((var), field), 1); \
- (var) = (tvar))
-
-#define LIST_INIT(head) do { \
- LIST_FIRST((head)) = NULL; \
-} while (*"\0")
-
-#define LIST_INSERT_AFTER(listelm, elm, field) do { \
- QMD_LIST_CHECK_NEXT(listelm, field); \
- if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
- LIST_NEXT((listelm), field)->field.le_prev = \
- &LIST_NEXT((elm), field); \
- LIST_NEXT((listelm), field) = (elm); \
- (elm)->field.le_prev = &LIST_NEXT((listelm), field); \
-} while (*"\0")
-
-#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
- QMD_LIST_CHECK_PREV(listelm, field); \
- (elm)->field.le_prev = (listelm)->field.le_prev; \
- LIST_NEXT((elm), field) = (listelm); \
- *(listelm)->field.le_prev = (elm); \
- (listelm)->field.le_prev = &LIST_NEXT((elm), field); \
-} while (*"\0")
-
-#define LIST_INSERT_HEAD(head, elm, field) do { \
- QMD_LIST_CHECK_HEAD((head), field); \
- if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
- LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
- LIST_FIRST((head)) = (elm); \
- (elm)->field.le_prev = &LIST_FIRST((head)); \
-} while (*"\0")
-
-#define LIST_NEXT(elm, field) ((elm)->field.le_next)
-
-#define LIST_REMOVE(elm, field) do { \
- QMD_LIST_CHECK_NEXT(elm, field); \
- QMD_LIST_CHECK_PREV(elm, field); \
- if (LIST_NEXT((elm), field) != NULL) \
- LIST_NEXT((elm), field)->field.le_prev = \
- (elm)->field.le_prev; \
- *(elm)->field.le_prev = LIST_NEXT((elm), field); \
- TRASHIT((elm)->field.le_next); \
- TRASHIT((elm)->field.le_prev); \
-} while (*"\0")
-
-/*
- * Tail queue declarations.
- */
-#define TAILQ_HEAD(name, type) \
-struct name { \
- struct type *tqh_first; /* first element */ \
- struct type **tqh_last; /* addr of last next element */ \
- TRACEBUF \
-}
-
-#define TAILQ_HEAD_INITIALIZER(head) \
- { NULL, &(head).tqh_first }
-
-#define TAILQ_ENTRY(type) \
-struct { \
- struct type *tqe_next; /* next element */ \
- struct type **tqe_prev; /* address of previous next element */ \
- TRACEBUF \
-}
-
-/*
- * Tail queue functions.
- */
-#if (defined(_KERNEL) && defined(INVARIANTS))
-#define QMD_TAILQ_CHECK_HEAD(head, field) do { \
- if (!TAILQ_EMPTY(head) && \
- TAILQ_FIRST((head))->field.tqe_prev != \
- &TAILQ_FIRST((head))) \
- panic("Bad tailq head %p first->prev != head", (head)); \
-} while (*"\0")
-
-#define QMD_TAILQ_CHECK_TAIL(head, field) do { \
- if (*(head)->tqh_last != NULL) \
- panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \
-} while (*"\0")
-
-#define QMD_TAILQ_CHECK_NEXT(elm, field) do { \
- if (TAILQ_NEXT((elm), field) != NULL && \
- TAILQ_NEXT((elm), field)->field.tqe_prev != \
- &((elm)->field.tqe_next)) \
- panic("Bad link elm %p next->prev != elm", (elm)); \
-} while (*"\0")
-
-#define QMD_TAILQ_CHECK_PREV(elm, field) do { \
- if (*(elm)->field.tqe_prev != (elm)) \
- panic("Bad link elm %p prev->next != elm", (elm)); \
-} while (*"\0")
-#else
-#define QMD_TAILQ_CHECK_HEAD(head, field)
-#define QMD_TAILQ_CHECK_TAIL(head, headname)
-#define QMD_TAILQ_CHECK_NEXT(elm, field)
-#define QMD_TAILQ_CHECK_PREV(elm, field)
-#endif /* (_KERNEL && INVARIANTS) */
-
-#define TAILQ_CONCAT(head1, head2, field) do { \
- if (!TAILQ_EMPTY(head2)) { \
- *(head1)->tqh_last = (head2)->tqh_first; \
- (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
- (head1)->tqh_last = (head2)->tqh_last; \
- TAILQ_INIT((head2)); \
- QMD_TRACE_HEAD(head1); \
- QMD_TRACE_HEAD(head2); \
- } \
-} while (*"\0")
-
-#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
-
-#define TAILQ_FIRST(head) ((head)->tqh_first)
-
-#define TAILQ_FOREACH(var, head, field) \
- for ((var) = TAILQ_FIRST((head)); \
- (var); \
- (var) = TAILQ_NEXT((var), field))
-
-#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
- for ((var) = TAILQ_FIRST((head)); \
- (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
- (var) = (tvar))
-
-#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
- for ((var) = TAILQ_LAST((head), headname); \
- (var); \
- (var) = TAILQ_PREV((var), headname, field))
-
-#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
- for ((var) = TAILQ_LAST((head), headname); \
- (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
- (var) = (tvar))
-
-#define TAILQ_INIT(head) do { \
- TAILQ_FIRST((head)) = NULL; \
- (head)->tqh_last = &TAILQ_FIRST((head)); \
- QMD_TRACE_HEAD(head); \
-} while (*"\0")
-
-#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
- QMD_TAILQ_CHECK_NEXT(listelm, field); \
- if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
- TAILQ_NEXT((elm), field)->field.tqe_prev = \
- &TAILQ_NEXT((elm), field); \
- else { \
- (head)->tqh_last = &TAILQ_NEXT((elm), field); \
- QMD_TRACE_HEAD(head); \
- } \
- TAILQ_NEXT((listelm), field) = (elm); \
- (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
- QMD_TRACE_ELEM(&(elm)->field); \
- QMD_TRACE_ELEM(&listelm->field); \
-} while (*"\0")
-
-#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
- QMD_TAILQ_CHECK_PREV(listelm, field); \
- (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
- TAILQ_NEXT((elm), field) = (listelm); \
- *(listelm)->field.tqe_prev = (elm); \
- (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
- QMD_TRACE_ELEM(&(elm)->field); \
- QMD_TRACE_ELEM(&listelm->field); \
-} while (*"\0")
-
-#define TAILQ_INSERT_HEAD(head, elm, field) do { \
- QMD_TAILQ_CHECK_HEAD(head, field); \
- if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
- TAILQ_FIRST((head))->field.tqe_prev = \
- &TAILQ_NEXT((elm), field); \
- else \
- (head)->tqh_last = &TAILQ_NEXT((elm), field); \
- TAILQ_FIRST((head)) = (elm); \
- (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
- QMD_TRACE_HEAD(head); \
- QMD_TRACE_ELEM(&(elm)->field); \
-} while (*"\0")
-
-#define TAILQ_INSERT_TAIL(head, elm, field) do { \
- QMD_TAILQ_CHECK_TAIL(head, field); \
- TAILQ_NEXT((elm), field) = NULL; \
- (elm)->field.tqe_prev = (head)->tqh_last; \
- *(head)->tqh_last = (elm); \
- (head)->tqh_last = &TAILQ_NEXT((elm), field); \
- QMD_TRACE_HEAD(head); \
- QMD_TRACE_ELEM(&(elm)->field); \
-} while (*"\0")
-
-#define TAILQ_LAST(head, headname) \
- (*(((struct headname *)((head)->tqh_last))->tqh_last))
-
-#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
-
-#define TAILQ_PREV(elm, headname, field) \
- (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
-
-#define TAILQ_REMOVE(head, elm, field) do { \
- QMD_TAILQ_CHECK_NEXT(elm, field); \
- QMD_TAILQ_CHECK_PREV(elm, field); \
- if ((TAILQ_NEXT((elm), field)) != NULL) \
- TAILQ_NEXT((elm), field)->field.tqe_prev = \
- (elm)->field.tqe_prev; \
- else { \
- (head)->tqh_last = (elm)->field.tqe_prev; \
- QMD_TRACE_HEAD(head); \
- } \
- *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
- TRASHIT((elm)->field.tqe_next); \
- TRASHIT((elm)->field.tqe_prev); \
- QMD_TRACE_ELEM(&(elm)->field); \
-} while (*"\0")
-
-
-#ifdef _KERNEL
-
-#endif /* _KERNEL */
-
-#endif /* !_SYS_QUEUE_H_ */
-
-/* END CSTYLED */