dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pull DRM updates from Dave Airlie:
 "This is the one and only next pull for 3.8, we had a regression we
  found last week, so I was waiting for that to resolve itself, and I
  ended up with some Intel fixes on top as well.

  Highlights:
   - new driver: nvidia tegra 20/30/hdmi support
   - radeon: add support for previously unused DMA engines, more HDMI
     regs, eviction speeds ups and fixes
   - i915: HSW support enable, agp removal on GEN6, seqno wrapping
   - exynos: IPP subsystem support (image post proc), HDMI
   - nouveau: display class reworking, nv20->40 z compression
   - ttm: start of locking fixes, rcu usage for lookups,
   - core: documentation updates, docbook integration, monotonic clock
     usage, move from connector to object properties"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (590 commits)
  drm/exynos: add gsc ipp driver
  drm/exynos: add rotator ipp driver
  drm/exynos: add fimc ipp driver
  drm/exynos: add iommu support for ipp
  drm/exynos: add ipp subsystem
  drm/exynos: support device tree for fimd
  radeon: fix regression with eviction since evict caching changes
  drm/radeon: add more pedantic checks in the CP DMA checker
  drm/radeon: bump version for CS ioctl support for async DMA
  drm/radeon: enable the async DMA rings in the CS ioctl
  drm/radeon: add VM CS parser support for async DMA on cayman/TN/SI
  drm/radeon/kms: add evergreen/cayman CS parser for async DMA (v2)
  drm/radeon/kms: add 6xx/7xx CS parser for async DMA (v2)
  drm/radeon: fix htile buffer size computation for command stream checker
  drm/radeon: fix fence locking in the pageflip callback
  drm/radeon: make indirect register access concurrency-safe
  drm/radeon: add W|RREG32_IDX for MM_INDEX|DATA based mmio accesss
  drm/exynos: support extended screen coordinate of fimd
  drm/exynos: fix x, y coordinates for right bottom pixel
  drm/exynos: fix fb offset calculation for plane
  ...
This commit is contained in:
Linus Torvalds 2012-12-17 08:26:17 -08:00
commit 3c2e81ef34
364 changed files with 39100 additions and 15045 deletions

View File

@ -91,3 +91,12 @@ transferred to 'device' domain. This attribute can be also used for
dma_unmap_{single,page,sg} functions family to force buffer to stay in dma_unmap_{single,page,sg} functions family to force buffer to stay in
device domain after releasing a mapping for it. Use this attribute with device domain after releasing a mapping for it. Use this attribute with
care! care!
DMA_ATTR_FORCE_CONTIGUOUS
-------------------------
By default DMA-mapping subsystem is allowed to assemble the buffer
allocated by dma_alloc_attrs() function from individual pages if it can
be mapped as contiguous chunk into device dma address space. By
specifing this attribute the allocated buffer is forced to be contiguous
also in physical memory.

View File

@ -1141,23 +1141,13 @@ int max_width, max_height;</synopsis>
the <methodname>page_flip</methodname> operation will be called with a the <methodname>page_flip</methodname> operation will be called with a
non-NULL <parameter>event</parameter> argument pointing to a non-NULL <parameter>event</parameter> argument pointing to a
<structname>drm_pending_vblank_event</structname> instance. Upon page <structname>drm_pending_vblank_event</structname> instance. Upon page
flip completion the driver must fill the flip completion the driver must call <methodname>drm_send_vblank_event</methodname>
<parameter>event</parameter>::<structfield>event</structfield> to fill in the event and send to wake up any waiting processes.
<structfield>sequence</structfield>, <structfield>tv_sec</structfield> This can be performed with
and <structfield>tv_usec</structfield> fields with the associated
vertical blanking count and timestamp, add the event to the
<parameter>drm_file</parameter> list of events to be signaled, and wake
up any waiting process. This can be performed with
<programlisting><![CDATA[ <programlisting><![CDATA[
struct timeval now;
event->event.sequence = drm_vblank_count_and_time(..., &now);
event->event.tv_sec = now.tv_sec;
event->event.tv_usec = now.tv_usec;
spin_lock_irqsave(&dev->event_lock, flags); spin_lock_irqsave(&dev->event_lock, flags);
list_add_tail(&event->base.link, &event->base.file_priv->event_list); ...
wake_up_interruptible(&event->base.file_priv->event_wait); drm_send_vblank_event(dev, pipe, event);
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);
]]></programlisting> ]]></programlisting>
</para> </para>
@ -1621,10 +1611,10 @@ void intel_crt_init(struct drm_device *dev)
</sect2> </sect2>
</sect1> </sect1>
<!-- Internals: mid-layer helper functions --> <!-- Internals: kms helper functions -->
<sect1> <sect1>
<title>Mid-layer Helper Functions</title> <title>Mode Setting Helper Functions</title>
<para> <para>
The CRTC, encoder and connector functions provided by the drivers The CRTC, encoder and connector functions provided by the drivers
implement the DRM API. They're called by the DRM core and ioctl handlers implement the DRM API. They're called by the DRM core and ioctl handlers
@ -2106,6 +2096,21 @@ void intel_crt_init(struct drm_device *dev)
</listitem> </listitem>
</itemizedlist> </itemizedlist>
</sect2> </sect2>
<sect2>
<title>Modeset Helper Functions Reference</title>
!Edrivers/gpu/drm/drm_crtc_helper.c
</sect2>
<sect2>
<title>fbdev Helper Functions Reference</title>
!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
!Edrivers/gpu/drm/drm_fb_helper.c
</sect2>
<sect2>
<title>Display Port Helper Functions Reference</title>
!Pdrivers/gpu/drm/drm_dp_helper.c dp helpers
!Iinclude/drm/drm_dp_helper.h
!Edrivers/gpu/drm/drm_dp_helper.c
</sect2>
</sect1> </sect1>
<!-- Internals: vertical blanking --> <!-- Internals: vertical blanking -->

View File

@ -0,0 +1,191 @@
NVIDIA Tegra host1x
Required properties:
- compatible: "nvidia,tegra<chip>-host1x"
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- #address-cells: The number of cells used to represent physical base addresses
in the host1x address space. Should be 1.
- #size-cells: The number of cells used to represent the size of an address
range in the host1x address space. Should be 1.
- ranges: The mapping of the host1x address space to the CPU address space.
The host1x top-level node defines a number of children, each representing one
of the following host1x client modules:
- mpe: video encoder
Required properties:
- compatible: "nvidia,tegra<chip>-mpe"
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- vi: video input
Required properties:
- compatible: "nvidia,tegra<chip>-vi"
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- epp: encoder pre-processor
Required properties:
- compatible: "nvidia,tegra<chip>-epp"
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- isp: image signal processor
Required properties:
- compatible: "nvidia,tegra<chip>-isp"
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- gr2d: 2D graphics engine
Required properties:
- compatible: "nvidia,tegra<chip>-gr2d"
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- gr3d: 3D graphics engine
Required properties:
- compatible: "nvidia,tegra<chip>-gr3d"
- reg: Physical base address and length of the controller's registers.
- dc: display controller
Required properties:
- compatible: "nvidia,tegra<chip>-dc"
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
Each display controller node has a child node, named "rgb", that represents
the RGB output associated with the controller. It can take the following
optional properties:
- nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
- nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
- nvidia,edid: supplies a binary EDID blob
- hdmi: High Definition Multimedia Interface
Required properties:
- compatible: "nvidia,tegra<chip>-hdmi"
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- vdd-supply: regulator for supply voltage
- pll-supply: regulator for PLL
Optional properties:
- nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
- nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
- nvidia,edid: supplies a binary EDID blob
- tvo: TV encoder output
Required properties:
- compatible: "nvidia,tegra<chip>-tvo"
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- dsi: display serial interface
Required properties:
- compatible: "nvidia,tegra<chip>-dsi"
- reg: Physical base address and length of the controller's registers.
Example:
/ {
...
host1x {
compatible = "nvidia,tegra20-host1x", "simple-bus";
reg = <0x50000000 0x00024000>;
interrupts = <0 65 0x04 /* mpcore syncpt */
0 67 0x04>; /* mpcore general */
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x54000000 0x54000000 0x04000000>;
mpe {
compatible = "nvidia,tegra20-mpe";
reg = <0x54040000 0x00040000>;
interrupts = <0 68 0x04>;
};
vi {
compatible = "nvidia,tegra20-vi";
reg = <0x54080000 0x00040000>;
interrupts = <0 69 0x04>;
};
epp {
compatible = "nvidia,tegra20-epp";
reg = <0x540c0000 0x00040000>;
interrupts = <0 70 0x04>;
};
isp {
compatible = "nvidia,tegra20-isp";
reg = <0x54100000 0x00040000>;
interrupts = <0 71 0x04>;
};
gr2d {
compatible = "nvidia,tegra20-gr2d";
reg = <0x54140000 0x00040000>;
interrupts = <0 72 0x04>;
};
gr3d {
compatible = "nvidia,tegra20-gr3d";
reg = <0x54180000 0x00040000>;
};
dc@54200000 {
compatible = "nvidia,tegra20-dc";
reg = <0x54200000 0x00040000>;
interrupts = <0 73 0x04>;
rgb {
status = "disabled";
};
};
dc@54240000 {
compatible = "nvidia,tegra20-dc";
reg = <0x54240000 0x00040000>;
interrupts = <0 74 0x04>;
rgb {
status = "disabled";
};
};
hdmi {
compatible = "nvidia,tegra20-hdmi";
reg = <0x54280000 0x00040000>;
interrupts = <0 75 0x04>;
status = "disabled";
};
tvo {
compatible = "nvidia,tegra20-tvo";
reg = <0x542c0000 0x00040000>;
interrupts = <0 76 0x04>;
status = "disabled";
};
dsi {
compatible = "nvidia,tegra20-dsi";
reg = <0x54300000 0x00040000>;
status = "disabled";
};
};
...
};

View File

@ -213,3 +213,91 @@ presentation on krefs, which can be found at:
and: and:
http://www.kroah.com/linux/talks/ols_2004_kref_talk/ http://www.kroah.com/linux/talks/ols_2004_kref_talk/
The above example could also be optimized using kref_get_unless_zero() in
the following way:
static struct my_data *get_entry()
{
struct my_data *entry = NULL;
mutex_lock(&mutex);
if (!list_empty(&q)) {
entry = container_of(q.next, struct my_data, link);
if (!kref_get_unless_zero(&entry->refcount))
entry = NULL;
}
mutex_unlock(&mutex);
return entry;
}
static void release_entry(struct kref *ref)
{
struct my_data *entry = container_of(ref, struct my_data, refcount);
mutex_lock(&mutex);
list_del(&entry->link);
mutex_unlock(&mutex);
kfree(entry);
}
static void put_entry(struct my_data *entry)
{
kref_put(&entry->refcount, release_entry);
}
Which is useful to remove the mutex lock around kref_put() in put_entry(), but
it's important that kref_get_unless_zero is enclosed in the same critical
section that finds the entry in the lookup table,
otherwise kref_get_unless_zero may reference already freed memory.
Note that it is illegal to use kref_get_unless_zero without checking its
return value. If you are sure (by already having a valid pointer) that
kref_get_unless_zero() will return true, then use kref_get() instead.
The function kref_get_unless_zero also makes it possible to use rcu
locking for lookups in the above example:
struct my_data
{
struct rcu_head rhead;
.
struct kref refcount;
.
.
};
static struct my_data *get_entry_rcu()
{
struct my_data *entry = NULL;
rcu_read_lock();
if (!list_empty(&q)) {
entry = container_of(q.next, struct my_data, link);
if (!kref_get_unless_zero(&entry->refcount))
entry = NULL;
}
rcu_read_unlock();
return entry;
}
static void release_entry_rcu(struct kref *ref)
{
struct my_data *entry = container_of(ref, struct my_data, refcount);
mutex_lock(&mutex);
list_del_rcu(&entry->link);
mutex_unlock(&mutex);
kfree_rcu(entry, rhead);
}
static void put_entry(struct my_data *entry)
{
kref_put(&entry->refcount, release_entry_rcu);
}
But note that the struct kref member needs to remain in valid memory for a
rcu grace period after release_entry_rcu was called. That can be accomplished
by using kfree_rcu(entry, rhead) as done above, or by calling synchronize_rcu()
before using kfree, but note that synchronize_rcu() may sleep for a
substantial amount of time.
Thomas Hellstrom <thellstrom@vmware.com>

View File

@ -2549,6 +2549,15 @@ S: Supported
F: drivers/gpu/drm/exynos F: drivers/gpu/drm/exynos
F: include/drm/exynos* F: include/drm/exynos*
DRM DRIVERS FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@avionic-design.de>
L: dri-devel@lists.freedesktop.org
L: linux-tegra@vger.kernel.org
T: git git://gitorious.org/thierryreding/linux.git
S: Maintained
F: drivers/gpu/drm/tegra/
F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
DSCC4 DRIVER DSCC4 DRIVER
M: Francois Romieu <romieu@fr.zoreil.com> M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org

View File

@ -1034,7 +1034,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
spin_unlock_irqrestore(&mapping->lock, flags); spin_unlock_irqrestore(&mapping->lock, flags);
} }
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
gfp_t gfp, struct dma_attrs *attrs)
{ {
struct page **pages; struct page **pages;
int count = size >> PAGE_SHIFT; int count = size >> PAGE_SHIFT;
@ -1048,6 +1049,23 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
if (!pages) if (!pages)
return NULL; return NULL;
if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
{
unsigned long order = get_order(size);
struct page *page;
page = dma_alloc_from_contiguous(dev, count, order);
if (!page)
goto error;
__dma_clear_buffer(page, size);
for (i = 0; i < count; i++)
pages[i] = page + i;
return pages;
}
while (count) { while (count) {
int j, order = __fls(count); int j, order = __fls(count);
@ -1081,14 +1099,21 @@ error:
return NULL; return NULL;
} }
static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size) static int __iommu_free_buffer(struct device *dev, struct page **pages,
size_t size, struct dma_attrs *attrs)
{ {
int count = size >> PAGE_SHIFT; int count = size >> PAGE_SHIFT;
int array_size = count * sizeof(struct page *); int array_size = count * sizeof(struct page *);
int i; int i;
for (i = 0; i < count; i++)
if (pages[i]) if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
__free_pages(pages[i], 0); dma_release_from_contiguous(dev, pages[0], count);
} else {
for (i = 0; i < count; i++)
if (pages[i])
__free_pages(pages[i], 0);
}
if (array_size <= PAGE_SIZE) if (array_size <= PAGE_SIZE)
kfree(pages); kfree(pages);
else else
@ -1250,7 +1275,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (gfp & GFP_ATOMIC) if (gfp & GFP_ATOMIC)
return __iommu_alloc_atomic(dev, size, handle); return __iommu_alloc_atomic(dev, size, handle);
pages = __iommu_alloc_buffer(dev, size, gfp); pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
if (!pages) if (!pages)
return NULL; return NULL;
@ -1271,7 +1296,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
err_mapping: err_mapping:
__iommu_remove_mapping(dev, *handle, size); __iommu_remove_mapping(dev, *handle, size);
err_buffer: err_buffer:
__iommu_free_buffer(dev, pages, size); __iommu_free_buffer(dev, pages, size, attrs);
return NULL; return NULL;
} }
@ -1327,7 +1352,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
} }
__iommu_remove_mapping(dev, handle, size); __iommu_remove_mapping(dev, handle, size);
__iommu_free_buffer(dev, pages, size); __iommu_free_buffer(dev, pages, size, attrs);
} }
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,

View File

@ -62,12 +62,6 @@
#define I810_PTE_LOCAL 0x00000002 #define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001 #define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006 #define I830_PTE_SYSTEM_CACHED 0x00000006
/* GT PTE cache control fields */
#define GEN6_PTE_UNCACHED 0x00000002
#define HSW_PTE_UNCACHED 0x00000000
#define GEN6_PTE_LLC 0x00000004
#define GEN6_PTE_LLC_MLC 0x00000006
#define GEN6_PTE_GFDT 0x00000008
#define I810_SMRAM_MISCC 0x70 #define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000 #define I810_GFX_MEM_WIN_SIZE 0x00010000
@ -97,7 +91,6 @@
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN) #define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */ #define GFX_FLSH_CNTL 0x2170 /* 915+ */
#define GFX_FLSH_CNTL_VLV 0x101008
#define I810_DRAM_CTL 0x3000 #define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001 #define I810_DRAM_ROW_0 0x00000001
@ -148,29 +141,6 @@
#define INTEL_I7505_AGPCTRL 0x70 #define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50 #define INTEL_I7505_MCHCFG 0x50
#define SNB_GMCH_CTRL 0x50
#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
#define SNB_GTT_SIZE_0M (0 << 8)
#define SNB_GTT_SIZE_1M (1 << 8)
#define SNB_GTT_SIZE_2M (2 << 8)
#define SNB_GTT_SIZE_MASK (3 << 8)
/* pci devices ids */ /* pci devices ids */
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 #define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
@ -219,66 +189,5 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
#endif #endif

View File

@ -367,62 +367,6 @@ static unsigned int intel_gtt_stolen_size(void)
stolen_size = 0; stolen_size = 0;
break; break;
} }
} else if (INTEL_GTT_GEN == 6) {
/*
* SandyBridge has new memory control reg at 0x50.w
*/
u16 snb_gmch_ctl;
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
case SNB_GMCH_GMS_STOLEN_32M:
stolen_size = MB(32);
break;
case SNB_GMCH_GMS_STOLEN_64M:
stolen_size = MB(64);
break;
case SNB_GMCH_GMS_STOLEN_96M:
stolen_size = MB(96);
break;
case SNB_GMCH_GMS_STOLEN_128M:
stolen_size = MB(128);
break;
case SNB_GMCH_GMS_STOLEN_160M:
stolen_size = MB(160);
break;
case SNB_GMCH_GMS_STOLEN_192M:
stolen_size = MB(192);
break;
case SNB_GMCH_GMS_STOLEN_224M:
stolen_size = MB(224);
break;
case SNB_GMCH_GMS_STOLEN_256M:
stolen_size = MB(256);
break;
case SNB_GMCH_GMS_STOLEN_288M:
stolen_size = MB(288);
break;
case SNB_GMCH_GMS_STOLEN_320M:
stolen_size = MB(320);
break;
case SNB_GMCH_GMS_STOLEN_352M:
stolen_size = MB(352);
break;
case SNB_GMCH_GMS_STOLEN_384M:
stolen_size = MB(384);
break;
case SNB_GMCH_GMS_STOLEN_416M:
stolen_size = MB(416);
break;
case SNB_GMCH_GMS_STOLEN_448M:
stolen_size = MB(448);
break;
case SNB_GMCH_GMS_STOLEN_480M:
stolen_size = MB(480);
break;
case SNB_GMCH_GMS_STOLEN_512M:
stolen_size = MB(512);
break;
}
} else { } else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) { switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M: case I855_GMCH_GMS_STOLEN_1M:
@ -556,29 +500,9 @@ static unsigned int i965_gtt_total_entries(void)
static unsigned int intel_gtt_total_entries(void) static unsigned int intel_gtt_total_entries(void)
{ {
int size;
if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
return i965_gtt_total_entries(); return i965_gtt_total_entries();
else if (INTEL_GTT_GEN == 6) { else {
u16 snb_gmch_ctl;
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
default:
case SNB_GTT_SIZE_0M:
printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
size = MB(0);
break;
case SNB_GTT_SIZE_1M:
size = MB(1);
break;
case SNB_GTT_SIZE_2M:
size = MB(2);
break;
}
return size/4;
} else {
/* On previous hardware, the GTT size was just what was /* On previous hardware, the GTT size was just what was
* required to map the aperture. * required to map the aperture.
*/ */
@ -778,9 +702,6 @@ bool intel_enable_gtt(void)
{ {
u8 __iomem *reg; u8 __iomem *reg;
if (INTEL_GTT_GEN >= 6)
return true;
if (INTEL_GTT_GEN == 2) { if (INTEL_GTT_GEN == 2) {
u16 gmch_ctrl; u16 gmch_ctrl;
@ -1149,85 +1070,6 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry); writel(addr | pte_flags, intel_private.gtt + entry);
} }
static bool gen6_check_flags(unsigned int flags)
{
return true;
}
static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
if (type_mask == AGP_USER_MEMORY)
pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
} else { /* set 'normal'/'cached' to LLC by default */
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
}
static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
if (type_mask == AGP_USER_MEMORY)
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
} else { /* set 'normal'/'cached' to LLC by default */
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
}
static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
if (type_mask == AGP_USER_MEMORY)
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
else {
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
}
static void gen6_cleanup(void)
{
}
/* Certain Gen5 chipsets require require idling the GPU before /* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled. * unmapping anything from the GTT when VT-d is enabled.
*/ */
@ -1249,41 +1091,29 @@ static inline int needs_idle_maps(void)
static int i9xx_setup(void) static int i9xx_setup(void)
{ {
u32 reg_addr; u32 reg_addr, gtt_addr;
int size = KB(512); int size = KB(512);
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr); pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
reg_addr &= 0xfff80000; reg_addr &= 0xfff80000;
if (INTEL_GTT_GEN >= 7)
size = MB(2);
intel_private.registers = ioremap(reg_addr, size); intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers) if (!intel_private.registers)
return -ENOMEM; return -ENOMEM;
if (INTEL_GTT_GEN == 3) { switch (INTEL_GTT_GEN) {
u32 gtt_addr; case 3:
pci_read_config_dword(intel_private.pcidev, pci_read_config_dword(intel_private.pcidev,
I915_PTEADDR, &gtt_addr); I915_PTEADDR, &gtt_addr);
intel_private.gtt_bus_addr = gtt_addr; intel_private.gtt_bus_addr = gtt_addr;
} else { break;
u32 gtt_offset; case 5:
intel_private.gtt_bus_addr = reg_addr + MB(2);
switch (INTEL_GTT_GEN) { break;
case 5: default:
case 6: intel_private.gtt_bus_addr = reg_addr + KB(512);
case 7: break;
gtt_offset = MB(2);
break;
case 4:
default:
gtt_offset = KB(512);
break;
}
intel_private.gtt_bus_addr = reg_addr + gtt_offset;
} }
if (needs_idle_maps()) if (needs_idle_maps())
@ -1395,32 +1225,6 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.check_flags = i830_check_flags, .check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush, .chipset_flush = i9xx_chipset_flush,
}; };
static const struct intel_gtt_driver sandybridge_gtt_driver = {
.gen = 6,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = gen6_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver haswell_gtt_driver = {
.gen = 6,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = haswell_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver valleyview_gtt_driver = {
.gen = 7,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = valleyview_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine * driver and gmch_driver must be non-null, and find_gmch will determine
@ -1501,106 +1305,6 @@ static const struct intel_gtt_driver_description {
"HD Graphics", &ironlake_gtt_driver }, "HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", &ironlake_gtt_driver }, "HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
"ValleyView", &valleyview_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ 0, NULL, NULL } { 0, NULL, NULL }
}; };
@ -1686,7 +1390,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
} }
EXPORT_SYMBOL(intel_gmch_probe); EXPORT_SYMBOL(intel_gmch_probe);
const struct intel_gtt *intel_gtt_get(void) struct intel_gtt *intel_gtt_get(void)
{ {
return &intel_private.base; return &intel_private.base;
} }

View File

@ -210,3 +210,5 @@ source "drivers/gpu/drm/mgag200/Kconfig"
source "drivers/gpu/drm/cirrus/Kconfig" source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig" source "drivers/gpu/drm/shmobile/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"

View File

@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \ drm_context.o drm_dma.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ drm_agpsupport.o drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \ drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \ drm_info.o drm_debugfs.o drm_encoder_slave.o \
@ -16,10 +16,11 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-$(CONFIG_PCI) += ati_pcigart.o
drm-usb-y := drm_usb.o drm-usb-y := drm_usb.o
drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@ -48,4 +49,5 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/ obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/ obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-y += i2c/ obj-y += i2c/

View File

@ -186,11 +186,11 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
static int ast_bo_move(struct ttm_buffer_object *bo, static int ast_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible, bool evict, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu, bool no_wait_gpu,
struct ttm_mem_reg *new_mem) struct ttm_mem_reg *new_mem)
{ {
int r; int r;
r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r; return r;
} }
@ -356,7 +356,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size, ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
ttm_bo_type_device, &astbo->placement, ttm_bo_type_device, &astbo->placement,
align >> PAGE_SHIFT, 0, false, NULL, acc_size, align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, ast_bo_ttm_destroy); NULL, ast_bo_ttm_destroy);
if (ret) if (ret)
return ret; return ret;
@ -383,7 +383,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
ast_ttm_placement(bo, pl_flag); ast_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++) for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) if (ret)
return ret; return ret;
@ -406,7 +406,7 @@ int ast_bo_unpin(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++) for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) if (ret)
return ret; return ret;
@ -431,7 +431,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++) for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) { if (ret) {
DRM_ERROR("pushing to VRAM failed\n"); DRM_ERROR("pushing to VRAM failed\n");
return ret; return ret;

View File

@ -35,12 +35,15 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
}; };
static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev) static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
{ {
struct apertures_struct *ap; struct apertures_struct *ap;
bool primary = false; bool primary = false;
ap = alloc_apertures(1); ap = alloc_apertures(1);
if (!ap)
return -ENOMEM;
ap->ranges[0].base = pci_resource_start(pdev, 0); ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0); ap->ranges[0].size = pci_resource_len(pdev, 0);
@ -49,12 +52,18 @@ static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
#endif #endif
remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary); remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
kfree(ap); kfree(ap);
return 0;
} }
static int __devinit static int __devinit
cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
cirrus_kick_out_firmware_fb(pdev); int ret;
ret = cirrus_kick_out_firmware_fb(pdev);
if (ret)
return ret;
return drm_get_pci_dev(pdev, ent, &driver); return drm_get_pci_dev(pdev, ent, &driver);
} }

View File

@ -186,11 +186,11 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static int cirrus_bo_move(struct ttm_buffer_object *bo, static int cirrus_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible, bool evict, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu, bool no_wait_gpu,
struct ttm_mem_reg *new_mem) struct ttm_mem_reg *new_mem)
{ {
int r; int r;
r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r; return r;
} }
@ -361,7 +361,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size, ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
ttm_bo_type_device, &cirrusbo->placement, ttm_bo_type_device, &cirrusbo->placement,
align >> PAGE_SHIFT, 0, false, NULL, acc_size, align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, cirrus_bo_ttm_destroy); NULL, cirrus_bo_ttm_destroy);
if (ret) if (ret)
return ret; return ret;
@ -388,7 +388,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
cirrus_ttm_placement(bo, pl_flag); cirrus_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++) for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) if (ret)
return ret; return ret;
@ -411,7 +411,7 @@ int cirrus_bo_unpin(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++) for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) if (ret)
return ret; return ret;
@ -436,7 +436,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++) for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) { if (ret) {
DRM_ERROR("pushing to VRAM failed\n"); DRM_ERROR("pushing to VRAM failed\n");
return ret; return ret;

View File

@ -470,10 +470,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
if (crtc->gamma_store) { kfree(crtc->gamma_store);
kfree(crtc->gamma_store); crtc->gamma_store = NULL;
crtc->gamma_store = NULL;
}
drm_mode_object_put(dev, &crtc->base); drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head); list_del(&crtc->head);
@ -555,16 +553,17 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes); INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL; connector->edid_blob_ptr = NULL;
connector->status = connector_status_unknown;
list_add_tail(&connector->head, &dev->mode_config.connector_list); list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++; dev->mode_config.num_connector++;
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL) if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
dev->mode_config.edid_property, dev->mode_config.edid_property,
0); 0);
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
dev->mode_config.dpms_property, 0); dev->mode_config.dpms_property, 0);
out: out:
@ -2280,13 +2279,21 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
for (i = 0; i < num_planes; i++) { for (i = 0; i < num_planes; i++) {
unsigned int width = r->width / (i != 0 ? hsub : 1); unsigned int width = r->width / (i != 0 ? hsub : 1);
unsigned int height = r->height / (i != 0 ? vsub : 1);
unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
if (!r->handles[i]) { if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i); DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
return -EINVAL; return -EINVAL;
} }
if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) { if ((uint64_t) width * cpp > UINT_MAX)
return -ERANGE;
if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
return -ERANGE;
if (r->pitches[i] < width * cpp) {
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i); DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL; return -EINVAL;
} }
@ -2323,6 +2330,11 @@ int drm_mode_addfb2(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET)) if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL; return -EINVAL;
if (r->flags & ~DRM_MODE_FB_INTERLACED) {
DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
return -EINVAL;
}
if ((config->min_width > r->width) || (r->width > config->max_width)) { if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width); r->width, config->min_width, config->max_width);
@ -2916,27 +2928,6 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
} }
EXPORT_SYMBOL(drm_property_destroy); EXPORT_SYMBOL(drm_property_destroy);
void drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val)
{
drm_object_attach_property(&connector->base, property, init_val);
}
EXPORT_SYMBOL(drm_connector_attach_property);
int drm_connector_property_set_value(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
return drm_object_property_set_value(&connector->base, property, value);
}
EXPORT_SYMBOL(drm_connector_property_set_value);
int drm_connector_property_get_value(struct drm_connector *connector,
struct drm_property *property, uint64_t *val)
{
return drm_object_property_get_value(&connector->base, property, val);
}
EXPORT_SYMBOL(drm_connector_property_get_value);
void drm_object_attach_property(struct drm_mode_object *obj, void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property, struct drm_property *property,
uint64_t init_val) uint64_t init_val)
@ -3173,15 +3164,17 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
/* Delete edid, when there is none. */ /* Delete edid, when there is none. */
if (!edid) { if (!edid) {
connector->edid_blob_ptr = NULL; connector->edid_blob_ptr = NULL;
ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0); ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
return ret; return ret;
} }
size = EDID_LENGTH * (1 + edid->extensions); size = EDID_LENGTH * (1 + edid->extensions);
connector->edid_blob_ptr = drm_property_create_blob(connector->dev, connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
size, edid); size, edid);
if (!connector->edid_blob_ptr)
return -EINVAL;
ret = drm_connector_property_set_value(connector, ret = drm_object_property_set_value(&connector->base,
dev->mode_config.edid_property, dev->mode_config.edid_property,
connector->edid_blob_ptr->base.id); connector->edid_blob_ptr->base.id);
@ -3204,6 +3197,9 @@ static bool drm_property_change_is_valid(struct drm_property *property,
for (i = 0; i < property->num_values; i++) for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]); valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask); return !(value & ~valid_mask);
} else if (property->flags & DRM_MODE_PROP_BLOB) {
/* Only the driver knows */
return true;
} else { } else {
int i; int i;
for (i = 0; i < property->num_values; i++) for (i = 0; i < property->num_values; i++)
@ -3245,7 +3241,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
/* store the property value if successful */ /* store the property value if successful */
if (!ret) if (!ret)
drm_connector_property_set_value(connector, property, value); drm_object_property_set_value(&connector->base, property, value);
return ret; return ret;
} }
@ -3656,9 +3652,12 @@ void drm_mode_config_reset(struct drm_device *dev)
if (encoder->funcs->reset) if (encoder->funcs->reset)
encoder->funcs->reset(encoder); encoder->funcs->reset(encoder);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
connector->status = connector_status_unknown;
if (connector->funcs->reset) if (connector->funcs->reset)
connector->funcs->reset(connector); connector->funcs->reset(connector);
}
} }
EXPORT_SYMBOL(drm_mode_config_reset); EXPORT_SYMBOL(drm_mode_config_reset);

View File

@ -39,6 +39,35 @@
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h> #include <drm/drm_edid.h>
/**
* drm_helper_move_panel_connectors_to_head() - move panels to the front in the
* connector list
* @dev: drm device to operate on
*
* Some userspace presumes that the first connected connector is the main
* display, where it's supposed to display e.g. the login screen. For
* laptops, this should be the main panel. Use this function to sort all
* (eDP/LVDS) panels to the front of the connector list, instead of
* painstakingly trying to initialize them in the right order.
*/
void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
{
struct drm_connector *connector, *tmp;
struct list_head panel_list;
INIT_LIST_HEAD(&panel_list);
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
list_move_tail(&connector->head, &panel_list);
}
list_splice(&panel_list, &dev->mode_config.connector_list);
}
EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
static bool drm_kms_helper_poll = true; static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600); module_param_named(poll, drm_kms_helper_poll, bool, 0600);
@ -64,22 +93,21 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
/** /**
* drm_helper_probe_single_connector_modes - get complete set of display modes * drm_helper_probe_single_connector_modes - get complete set of display modes
* @dev: DRM device * @connector: connector to probe
* @maxX: max width for modes * @maxX: max width for modes
* @maxY: max height for modes * @maxY: max height for modes
* *
* LOCKING: * LOCKING:
* Caller must hold mode config lock. * Caller must hold mode config lock.
* *
* Based on @dev's mode_config layout, scan all the connectors and try to detect * Based on the helper callbacks implemented by @connector try to detect all
* modes on them. Modes will first be added to the connector's probed_modes * valid modes. Modes will first be added to the connector's probed_modes list,
* list, then culled (based on validity and the @maxX, @maxY parameters) and * then culled (based on validity and the @maxX, @maxY parameters) and put into
* put into the normal modes list. * the normal modes list.
* *
* Intended to be used either at bootup time or when major configuration * Intended to be use as a generic implementation of the ->probe() @connector
* changes have occurred. * callback for drivers that use the crtc helpers for output mode filtering and
* * detection.
* FIXME: take into account monitor limits
* *
* RETURNS: * RETURNS:
* Number of modes found on @connector. * Number of modes found on @connector.
@ -109,9 +137,14 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
connector->funcs->force(connector); connector->funcs->force(connector);
} else { } else {
connector->status = connector->funcs->detect(connector, true); connector->status = connector->funcs->detect(connector, true);
drm_kms_helper_poll_enable(dev);
} }
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
drm_kms_helper_poll_enable(dev);
dev->mode_config.poll_running = drm_kms_helper_poll;
if (connector->status == connector_status_disconnected) { if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, drm_get_connector_name(connector)); connector->base.id, drm_get_connector_name(connector));
@ -325,17 +358,24 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
} }
/** /**
* drm_crtc_set_mode - set a mode * drm_crtc_helper_set_mode - internal helper to set a mode
* @crtc: CRTC to program * @crtc: CRTC to program
* @mode: mode to use * @mode: mode to use
* @x: width of mode * @x: horizontal offset into the surface
* @y: height of mode * @y: vertical offset into the surface
* @old_fb: old framebuffer, for cleanup
* *
* LOCKING: * LOCKING:
* Caller must hold mode config lock. * Caller must hold mode config lock.
* *
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
* to fixup or reject the mode prior to trying to set it. * to fixup or reject the mode prior to trying to set it. This is an internal
* helper that drivers could e.g. use to update properties that require the
* entire output pipe to be disabled and re-enabled in a new configuration. For
* example for changing whether audio is enabled on a hdmi link or for changing
* panel fitter or dither attributes. It is also called by the
* drm_crtc_helper_set_config() helper function to drive the mode setting
* sequence.
* *
* RETURNS: * RETURNS:
* True if the mode was set successfully, or false otherwise. * True if the mode was set successfully, or false otherwise.
@ -491,20 +531,19 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
/** /**
* drm_crtc_helper_set_config - set a new config from userspace * drm_crtc_helper_set_config - set a new config from userspace
* @crtc: CRTC to setup * @set: mode set configuration
* @crtc_info: user provided configuration
* @new_mode: new mode to set
* @connector_set: set of connectors for the new config
* @fb: new framebuffer
* *
* LOCKING: * LOCKING:
* Caller must hold mode config lock. * Caller must hold mode config lock.
* *
* Setup a new configuration, provided by the user in @crtc_info, and enable * Setup a new configuration, provided by the upper layers (either an ioctl call
* it. * from userspace or internally e.g. from the fbdev suppport code) in @set, and
* enable it. This is the main helper functions for drivers that implement
* kernel mode setting with the crtc helper functions and the assorted
* ->prepare(), ->modeset() and ->commit() helper callbacks.
* *
* RETURNS: * RETURNS:
* Zero. (FIXME) * Returns 0 on success, -ERRNO on failure.
*/ */
int drm_crtc_helper_set_config(struct drm_mode_set *set) int drm_crtc_helper_set_config(struct drm_mode_set *set)
{ {
@ -800,12 +839,14 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
} }
/** /**
* drm_helper_connector_dpms * drm_helper_connector_dpms() - connector dpms helper implementation
* @connector affected connector * @connector: affected connector
* @mode DPMS mode * @mode: DPMS mode
* *
* Calls the low-level connector DPMS function, then * This is the main helper function provided by the crtc helper framework for
* calls appropriate encoder and crtc DPMS functions as well * implementing the DPMS connector attribute. It computes the new desired DPMS
* state for all encoders and crtcs in the output mesh and calls the ->dpms()
* callback provided by the driver appropriately.
*/ */
void drm_helper_connector_dpms(struct drm_connector *connector, int mode) void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{ {
@ -918,6 +959,15 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
} }
EXPORT_SYMBOL(drm_helper_resume_force_mode); EXPORT_SYMBOL(drm_helper_resume_force_mode);
void drm_kms_helper_hotplug_event(struct drm_device *dev)
{
/* send a uevent + call fbdev */
drm_sysfs_hotplug_event(dev);
if (dev->mode_config.funcs->output_poll_changed)
dev->mode_config.funcs->output_poll_changed(dev);
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
#define DRM_OUTPUT_POLL_PERIOD (10*HZ) #define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct work_struct *work) static void output_poll_execute(struct work_struct *work)
{ {
@ -933,20 +983,22 @@ static void output_poll_execute(struct work_struct *work)
mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
/* if this is HPD or polled don't check it - /* Ignore forced connectors. */
TV out for instance */ if (connector->force)
if (!connector->polled)
continue; continue;
else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT)) /* Ignore HPD capable connectors and connectors where we don't
repoll = true; * want any hotplug detection at all for polling. */
if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
continue;
repoll = true;
old_status = connector->status; old_status = connector->status;
/* if we are connected and don't want to poll for disconnect /* if we are connected and don't want to poll for disconnect
skip it */ skip it */
if (old_status == connector_status_connected && if (old_status == connector_status_connected &&
!(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) && !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue; continue;
connector->status = connector->funcs->detect(connector, false); connector->status = connector->funcs->detect(connector, false);
@ -960,12 +1012,8 @@ static void output_poll_execute(struct work_struct *work)
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
if (changed) { if (changed)
/* send a uevent + call fbdev */ drm_kms_helper_hotplug_event(dev);
drm_sysfs_hotplug_event(dev);
if (dev->mode_config.funcs->output_poll_changed)
dev->mode_config.funcs->output_poll_changed(dev);
}
if (repoll) if (repoll)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
@ -988,7 +1036,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
return; return;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled) if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT))
poll = true; poll = true;
} }
@ -1014,12 +1063,34 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
void drm_helper_hpd_irq_event(struct drm_device *dev) void drm_helper_hpd_irq_event(struct drm_device *dev)
{ {
struct drm_connector *connector;
enum drm_connector_status old_status;
bool changed = false;
if (!dev->mode_config.poll_enabled) if (!dev->mode_config.poll_enabled)
return; return;
/* kill timer and schedule immediate execution, this doesn't block */ mutex_lock(&dev->mode_config.mutex);
cancel_delayed_work(&dev->mode_config.output_poll_work); list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (drm_kms_helper_poll)
schedule_delayed_work(&dev->mode_config.output_poll_work, 0); /* Only handle HPD capable connectors. */
if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
old_status = connector->status;
connector->status = connector->funcs->detect(connector, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
connector->base.id,
drm_get_connector_name(connector),
old_status, connector->status);
if (old_status != connector->status)
changed = true;
}
mutex_unlock(&dev->mode_config.mutex);
if (changed)
drm_kms_helper_hotplug_event(dev);
} }
EXPORT_SYMBOL(drm_helper_hpd_irq_event); EXPORT_SYMBOL(drm_helper_hpd_irq_event);

View File

@ -30,6 +30,15 @@
#include <drm/drm_dp_helper.h> #include <drm/drm_dp_helper.h>
#include <drm/drmP.h> #include <drm/drmP.h>
/**
* DOC: dp helpers
*
* These functions contain some common logic and helpers at various abstraction
* levels to deal with Display Port sink devices and related things like DP aux
* channel transfers, EDID reading over DP aux channels, decoding certain DPCD
* blocks, ...
*/
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
static int static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
@ -37,7 +46,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
{ {
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret; int ret;
ret = (*algo_data->aux_ch)(adapter, mode, ret = (*algo_data->aux_ch)(adapter, mode,
write_byte, read_byte); write_byte, read_byte);
return ret; return ret;
@ -182,7 +191,6 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
{ {
(void) i2c_algo_dp_aux_address(adapter, 0, false); (void) i2c_algo_dp_aux_address(adapter, 0, false);
(void) i2c_algo_dp_aux_stop(adapter, false); (void) i2c_algo_dp_aux_stop(adapter, false);
} }
static int static int
@ -194,11 +202,23 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
return 0; return 0;
} }
/**
* i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
* @adapter: i2c adapter to register
*
* This registers an i2c adapater that uses dp aux channel as it's underlaying
* transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
* and store it in the algo_data member of the @adapter argument. This will be
* used by the i2c over dp aux algorithm to drive the hardware.
*
* RETURNS:
* 0 on success, -ERRNO on failure.
*/
int int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter) i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{ {
int error; int error;
error = i2c_dp_aux_prepare_bus(adapter); error = i2c_dp_aux_prepare_bus(adapter);
if (error) if (error)
return error; return error;
@ -206,3 +226,123 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
return error; return error;
} }
EXPORT_SYMBOL(i2c_dp_aux_add_bus); EXPORT_SYMBOL(i2c_dp_aux_add_bus);
/* Helpers for DP link training */
static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
return link_status[r - DP_LANE0_1_STATUS];
}
static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_LANE0_1_STATUS + (lane >> 1);
int s = (lane & 1) * 4;
u8 l = dp_link_status(link_status, i);
return (l >> s) & 0xf;
}
bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
u8 lane_align;
u8 lane_status;
int lane;
lane_align = dp_link_status(link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
for (lane = 0; lane < lane_count; lane++) {
lane_status = dp_get_lane_status(link_status, lane);
if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
return false;
}
return true;
}
EXPORT_SYMBOL(drm_dp_channel_eq_ok);
bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
int lane;
u8 lane_status;
for (lane = 0; lane < lane_count; lane++) {
lane_status = dp_get_lane_status(link_status, lane);
if ((lane_status & DP_LANE_CR_DONE) == 0)
return false;
}
return true;
}
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
u8 l = dp_link_status(link_status, i);
return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
u8 l = dp_link_status(link_status, i);
return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
udelay(100);
else
mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
udelay(400);
else
mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
switch (link_rate) {
case 162000:
default:
return DP_LINK_BW_1_62;
case 270000:
return DP_LINK_BW_2_7;
case 540000:
return DP_LINK_BW_5_4;
}
}
EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
int drm_dp_bw_code_to_link_rate(u8 link_bw)
{
switch (link_bw) {
case DP_LINK_BW_1_62:
default:
return 162000;
case DP_LINK_BW_2_7:
return 270000;
case DP_LINK_BW_5_4:
return 540000;
}
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);

View File

@ -307,12 +307,9 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
static bool drm_edid_is_zero(u8 *in_edid, int length) static bool drm_edid_is_zero(u8 *in_edid, int length)
{ {
int i; if (memchr_inv(in_edid, 0, length))
u32 *raw_edid = (u32 *)in_edid; return false;
for (i = 0; i < length / 4; i++)
if (*(raw_edid + i) != 0)
return false;
return true; return true;
} }
@ -1516,6 +1513,26 @@ u8 *drm_find_cea_extension(struct edid *edid)
} }
EXPORT_SYMBOL(drm_find_cea_extension); EXPORT_SYMBOL(drm_find_cea_extension);
/*
* Looks for a CEA mode matching given drm_display_mode.
* Returns its CEA Video ID code, or 0 if not found.
*/
u8 drm_match_cea_mode(struct drm_display_mode *to_match)
{
struct drm_display_mode *cea_mode;
u8 mode;
for (mode = 0; mode < drm_num_cea_modes; mode++) {
cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
if (drm_mode_equal(to_match, cea_mode))
return mode + 1;
}
return 0;
}
EXPORT_SYMBOL(drm_match_cea_mode);
static int static int
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len) do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
{ {
@ -1622,7 +1639,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
if (len >= 12) if (len >= 12)
connector->audio_latency[1] = db[12]; connector->audio_latency[1] = db[12];
DRM_LOG_KMS("HDMI: DVI dual %d, " DRM_DEBUG_KMS("HDMI: DVI dual %d, "
"max TMDS clock %d, " "max TMDS clock %d, "
"latency present %d %d, " "latency present %d %d, "
"video latency %d %d, " "video latency %d %d, "
@ -2062,3 +2079,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
return num_modes; return num_modes;
} }
EXPORT_SYMBOL(drm_add_modes_noedid); EXPORT_SYMBOL(drm_add_modes_noedid);
/**
* drm_mode_cea_vic - return the CEA-861 VIC of a given mode
* @mode: mode
*
* RETURNS:
* The VIC number, 0 in case it's not a CEA-861 mode.
*/
uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
{
uint8_t i;
for (i = 0; i < drm_num_cea_modes; i++)
if (drm_mode_equal(mode, &edid_cea_modes[i]))
return i + 1;
return 0;
}
EXPORT_SYMBOL(drm_mode_cea_vic);

View File

@ -27,6 +27,8 @@
* Dave Airlie <airlied@linux.ie> * Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com> * Jesse Barnes <jesse.barnes@intel.com>
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sysrq.h> #include <linux/sysrq.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -43,6 +45,15 @@ MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list); static LIST_HEAD(kernel_fb_helper_list);
/**
* DOC: fbdev helpers
*
* The fb helper functions are useful to provide an fbdev on top of a drm kernel
* mode setting driver. They can be used mostly independantely from the crtc
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
*/
/* simple single crtc case helper function */ /* simple single crtc case helper function */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{ {
@ -95,10 +106,16 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
if (mode->force) { if (mode->force) {
const char *s; const char *s;
switch (mode->force) { switch (mode->force) {
case DRM_FORCE_OFF: s = "OFF"; break; case DRM_FORCE_OFF:
case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break; s = "OFF";
break;
case DRM_FORCE_ON_DIGITAL:
s = "ON - dig";
break;
default: default:
case DRM_FORCE_ON: s = "ON"; break; case DRM_FORCE_ON:
s = "ON";
break;
} }
DRM_INFO("forcing %s connector %s\n", DRM_INFO("forcing %s connector %s\n",
@ -265,7 +282,7 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
if (panic_timeout < 0) if (panic_timeout < 0)
return 0; return 0;
printk(KERN_ERR "panic occurred, switching back to text console\n"); pr_err("panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode(); return drm_fb_helper_force_kernel_mode();
} }
EXPORT_SYMBOL(drm_fb_helper_panic); EXPORT_SYMBOL(drm_fb_helper_panic);
@ -331,7 +348,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
for (j = 0; j < fb_helper->connector_count; j++) { for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector; connector = fb_helper->connector_info[j]->connector;
connector->funcs->dpms(connector, dpms_mode); connector->funcs->dpms(connector, dpms_mode);
drm_connector_property_set_value(connector, drm_object_property_set_value(&connector->base,
dev->mode_config.dpms_property, dpms_mode); dev->mode_config.dpms_property, dpms_mode);
} }
} }
@ -433,7 +450,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
if (!list_empty(&fb_helper->kernel_fb_list)) { if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list); list_del(&fb_helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list)) { if (list_empty(&kernel_fb_helper_list)) {
printk(KERN_INFO "drm: unregistered panic notifier\n"); pr_info("drm: unregistered panic notifier\n");
atomic_notifier_chain_unregister(&panic_notifier_list, atomic_notifier_chain_unregister(&panic_notifier_list,
&paniced); &paniced);
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@ -724,9 +741,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* if driver picks 8 or 16 by default use that /* if driver picks 8 or 16 by default use that
for both depth/bpp */ for both depth/bpp */
if (preferred_bpp != sizes.surface_bpp) { if (preferred_bpp != sizes.surface_bpp)
sizes.surface_depth = sizes.surface_bpp = preferred_bpp; sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
}
/* first up get a count of crtcs now in use and new min/maxes width/heights */ /* first up get a count of crtcs now in use and new min/maxes width/heights */
for (i = 0; i < fb_helper->connector_count; i++) { for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
@ -794,18 +811,16 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
info = fb_helper->fbdev; info = fb_helper->fbdev;
/* set the fb pointer */ /* set the fb pointer */
for (i = 0; i < fb_helper->crtc_count; i++) { for (i = 0; i < fb_helper->crtc_count; i++)
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb; fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
}
if (new_fb) { if (new_fb) {
info->var.pixclock = 0; info->var.pixclock = 0;
if (register_framebuffer(info) < 0) { if (register_framebuffer(info) < 0)
return -EINVAL; return -EINVAL;
}
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
info->fix.id); info->node, info->fix.id);
} else { } else {
drm_fb_helper_set_par(info); drm_fb_helper_set_par(info);
@ -814,7 +829,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* Switch back to kernel console on panic */ /* Switch back to kernel console on panic */
/* multi card linked list maybe */ /* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) { if (list_empty(&kernel_fb_helper_list)) {
printk(KERN_INFO "drm: registered panic notifier\n"); dev_info(fb_helper->dev->dev, "registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list, atomic_notifier_chain_register(&panic_notifier_list,
&paniced); &paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@ -1002,11 +1017,11 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{ {
bool enable; bool enable;
if (strict) { if (strict)
enable = connector->status == connector_status_connected; enable = connector->status == connector_status_connected;
} else { else
enable = connector->status != connector_status_disconnected; enable = connector->status != connector_status_disconnected;
}
return enable; return enable;
} }
@ -1191,9 +1206,8 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
for (c = 0; c < fb_helper->crtc_count; c++) { for (c = 0; c < fb_helper->crtc_count; c++) {
crtc = &fb_helper->crtc_info[c]; crtc = &fb_helper->crtc_info[c];
if ((encoder->possible_crtcs & (1 << c)) == 0) { if ((encoder->possible_crtcs & (1 << c)) == 0)
continue; continue;
}
for (o = 0; o < n; o++) for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc) if (best_crtcs[o] == crtc)
@ -1246,6 +1260,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
sizeof(struct drm_display_mode *), GFP_KERNEL); sizeof(struct drm_display_mode *), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector, enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL); sizeof(bool), GFP_KERNEL);
if (!crtcs || !modes || !enabled) {
DRM_ERROR("Memory allocation failed\n");
goto out;
}
drm_enable_connectors(fb_helper, enabled); drm_enable_connectors(fb_helper, enabled);
@ -1284,6 +1303,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
} }
} }
out:
kfree(crtcs); kfree(crtcs);
kfree(modes); kfree(modes);
kfree(enabled); kfree(enabled);
@ -1291,12 +1311,14 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
/** /**
* drm_helper_initial_config - setup a sane initial connector configuration * drm_helper_initial_config - setup a sane initial connector configuration
* @dev: DRM device * @fb_helper: fb_helper device struct
* @bpp_sel: bpp value to use for the framebuffer configuration
* *
* LOCKING: * LOCKING:
* Called at init time, must take mode config lock. * Called at init time by the driver to set up the @fb_helper initial
* configuration, must take the mode config lock.
* *
* Scan the CRTCs and connectors and try to put together an initial setup. * Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with * At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store. * a new framebuffer object as the backing store.
* *
@ -1319,9 +1341,9 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
/* /*
* we shouldn't end up with no modes here. * we shouldn't end up with no modes here.
*/ */
if (count == 0) { if (count == 0)
printk(KERN_INFO "No connectors reported connected with modes\n"); dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
}
drm_setup_crtcs(fb_helper); drm_setup_crtcs(fb_helper);
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
@ -1330,7 +1352,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
/** /**
* drm_fb_helper_hotplug_event - respond to a hotplug notification by * drm_fb_helper_hotplug_event - respond to a hotplug notification by
* probing all the outputs attached to the fb. * probing all the outputs attached to the fb
* @fb_helper: the drm_fb_helper * @fb_helper: the drm_fb_helper
* *
* LOCKING: * LOCKING:

View File

@ -67,10 +67,8 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
hashed_key = hash_long(key, ht->order); hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key]; h_list = &ht->table[hashed_key];
hlist_for_each(list, h_list) { hlist_for_each_entry(entry, list, h_list, head)
entry = hlist_entry(list, struct drm_hash_item, head);
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
}
} }
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
@ -83,8 +81,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
hashed_key = hash_long(key, ht->order); hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key]; h_list = &ht->table[hashed_key];
hlist_for_each(list, h_list) { hlist_for_each_entry(entry, list, h_list, head) {
entry = hlist_entry(list, struct drm_hash_item, head);
if (entry->key == key) if (entry->key == key)
return list; return list;
if (entry->key > key) if (entry->key > key)
@ -93,6 +90,24 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
return NULL; return NULL;
} }
static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
struct hlist_node *list;
unsigned int hashed_key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
hlist_for_each_entry_rcu(entry, list, h_list, head) {
if (entry->key == key)
return list;
if (entry->key > key)
break;
}
return NULL;
}
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{ {
@ -105,8 +120,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
hashed_key = hash_long(key, ht->order); hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key]; h_list = &ht->table[hashed_key];
parent = NULL; parent = NULL;
hlist_for_each(list, h_list) { hlist_for_each_entry(entry, list, h_list, head) {
entry = hlist_entry(list, struct drm_hash_item, head);
if (entry->key == key) if (entry->key == key)
return -EINVAL; return -EINVAL;
if (entry->key > key) if (entry->key > key)
@ -114,9 +128,9 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
parent = list; parent = list;
} }
if (parent) { if (parent) {
hlist_add_after(parent, &item->head); hlist_add_after_rcu(parent, &item->head);
} else { } else {
hlist_add_head(&item->head, h_list); hlist_add_head_rcu(&item->head, h_list);
} }
return 0; return 0;
} }
@ -156,7 +170,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
{ {
struct hlist_node *list; struct hlist_node *list;
list = drm_ht_find_key(ht, key); list = drm_ht_find_key_rcu(ht, key);
if (!list) if (!list)
return -EINVAL; return -EINVAL;
@ -171,7 +185,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
list = drm_ht_find_key(ht, key); list = drm_ht_find_key(ht, key);
if (list) { if (list) {
hlist_del_init(list); hlist_del_init_rcu(list);
return 0; return 0;
} }
return -EINVAL; return -EINVAL;
@ -179,7 +193,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{ {
hlist_del_init(&item->head); hlist_del_init_rcu(&item->head);
return 0; return 0;
} }
EXPORT_SYMBOL(drm_ht_remove_item); EXPORT_SYMBOL(drm_ht_remove_item);

View File

@ -287,6 +287,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0; req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0; req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
break; break;
case DRM_CAP_TIMESTAMP_MONOTONIC:
req->value = drm_timestamp_monotonic;
break;
default: default:
return -EINVAL; return -EINVAL;
} }

View File

@ -106,6 +106,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
s64 diff_ns; s64 diff_ns;
int vblrc; int vblrc;
struct timeval tvblank; struct timeval tvblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
/* Prevent vblank irq processing while disabling vblank irqs, /* Prevent vblank irq processing while disabling vblank irqs,
* so no updates of timestamps or count can happen after we've * so no updates of timestamps or count can happen after we've
@ -131,7 +132,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
do { do {
dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc)); } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
if (!count)
vblrc = 0;
/* Compute time difference to stored timestamp of last vblank /* Compute time difference to stored timestamp of last vblank
* as updated by last invocation of drm_handle_vblank() in vblank irq. * as updated by last invocation of drm_handle_vblank() in vblank irq.
@ -576,7 +580,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
unsigned flags, unsigned flags,
struct drm_crtc *refcrtc) struct drm_crtc *refcrtc)
{ {
struct timeval stime, raw_time; ktime_t stime, etime, mono_time_offset;
struct timeval tv_etime;
struct drm_display_mode *mode; struct drm_display_mode *mode;
int vbl_status, vtotal, vdisplay; int vbl_status, vtotal, vdisplay;
int vpos, hpos, i; int vpos, hpos, i;
@ -625,13 +630,15 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
preempt_disable(); preempt_disable();
/* Get system timestamp before query. */ /* Get system timestamp before query. */
do_gettimeofday(&stime); stime = ktime_get();
/* Get vertical and horizontal scanout pos. vpos, hpos. */ /* Get vertical and horizontal scanout pos. vpos, hpos. */
vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos); vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
/* Get system timestamp after query. */ /* Get system timestamp after query. */
do_gettimeofday(&raw_time); etime = ktime_get();
if (!drm_timestamp_monotonic)
mono_time_offset = ktime_get_monotonic_offset();
preempt_enable(); preempt_enable();
@ -642,7 +649,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EIO; return -EIO;
} }
duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime); duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
/* Accept result with < max_error nsecs timing uncertainty. */ /* Accept result with < max_error nsecs timing uncertainty. */
if (duration_ns <= (s64) *max_error) if (duration_ns <= (s64) *max_error)
@ -689,14 +696,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
vbl_status |= 0x8; vbl_status |= 0x8;
} }
if (!drm_timestamp_monotonic)
etime = ktime_sub(etime, mono_time_offset);
/* save this only for debugging purposes */
tv_etime = ktime_to_timeval(etime);
/* Subtract time delta from raw timestamp to get final /* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank. * vblank_time timestamp for end of vblank.
*/ */
*vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns); etime = ktime_sub_ns(etime, delta_ns);
*vblank_time = ktime_to_timeval(etime);
DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
crtc, (int)vbl_status, hpos, vpos, crtc, (int)vbl_status, hpos, vpos,
(long)raw_time.tv_sec, (long)raw_time.tv_usec, (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec, (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
(int)duration_ns/1000, i); (int)duration_ns/1000, i);
@ -708,6 +721,17 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
} }
EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos); EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
static struct timeval get_drm_timestamp(void)
{
ktime_t now;
now = ktime_get();
if (!drm_timestamp_monotonic)
now = ktime_sub(now, ktime_get_monotonic_offset());
return ktime_to_timeval(now);
}
/** /**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval. * vblank interval.
@ -745,9 +769,9 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
} }
/* GPU high precision timestamp query unsupported or failed. /* GPU high precision timestamp query unsupported or failed.
* Return gettimeofday timestamp as best estimate. * Return current monotonic/gettimeofday timestamp as best estimate.
*/ */
do_gettimeofday(tvblank); *tvblank = get_drm_timestamp();
return 0; return 0;
} }
@ -802,6 +826,47 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
} }
EXPORT_SYMBOL(drm_vblank_count_and_time); EXPORT_SYMBOL(drm_vblank_count_and_time);
static void send_vblank_event(struct drm_device *dev,
struct drm_pending_vblank_event *e,
unsigned long seq, struct timeval *now)
{
WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
e->event.sequence = seq;
e->event.tv_sec = now->tv_sec;
e->event.tv_usec = now->tv_usec;
list_add_tail(&e->base.link,
&e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
e->event.sequence);
}
/**
* drm_send_vblank_event - helper to send vblank event after pageflip
* @dev: DRM device
* @crtc: CRTC in question
* @e: the event to send
*
* Updates sequence # and timestamp on event, and sends it to userspace.
* Caller must hold event lock.
*/
void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e)
{
struct timeval now;
unsigned int seq;
if (crtc >= 0) {
seq = drm_vblank_count_and_time(dev, crtc, &now);
} else {
seq = 0;
now = get_drm_timestamp();
}
send_vblank_event(dev, e, seq, &now);
}
EXPORT_SYMBOL(drm_send_vblank_event);
/** /**
* drm_update_vblank_count - update the master vblank counter * drm_update_vblank_count - update the master vblank counter
* @dev: DRM device * @dev: DRM device
@ -936,6 +1001,13 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
} }
EXPORT_SYMBOL(drm_vblank_put); EXPORT_SYMBOL(drm_vblank_put);
/**
* drm_vblank_off - disable vblank events on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
*
* Caller must hold event lock.
*/
void drm_vblank_off(struct drm_device *dev, int crtc) void drm_vblank_off(struct drm_device *dev, int crtc)
{ {
struct drm_pending_vblank_event *e, *t; struct drm_pending_vblank_event *e, *t;
@ -949,22 +1021,19 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
/* Send any queued vblank events, lest the natives grow disquiet */ /* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now); seq = drm_vblank_count_and_time(dev, crtc, &now);
spin_lock(&dev->event_lock);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc) if (e->pipe != crtc)
continue; continue;
DRM_DEBUG("Sending premature vblank event on disable: \ DRM_DEBUG("Sending premature vblank event on disable: \
wanted %d, current %d\n", wanted %d, current %d\n",
e->event.sequence, seq); e->event.sequence, seq);
list_del(&e->base.link);
e->event.sequence = seq;
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, e->pipe); drm_vblank_put(dev, e->pipe);
list_move_tail(&e->base.link, &e->base.file_priv->event_list); send_vblank_event(dev, e, seq, &now);
wake_up_interruptible(&e->base.file_priv->event_wait);
trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
e->event.sequence);
} }
spin_unlock(&dev->event_lock);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags); spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
} }
@ -1107,15 +1176,9 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->event.sequence = vblwait->request.sequence; e->event.sequence = vblwait->request.sequence;
if ((seq - vblwait->request.sequence) <= (1 << 23)) { if ((seq - vblwait->request.sequence) <= (1 << 23)) {
e->event.sequence = seq;
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, pipe); drm_vblank_put(dev, pipe);
list_add_tail(&e->base.link, &e->base.file_priv->event_list); send_vblank_event(dev, e, seq, &now);
wake_up_interruptible(&e->base.file_priv->event_wait);
vblwait->reply.sequence = seq; vblwait->reply.sequence = seq;
trace_drm_vblank_event_delivered(current->pid, pipe,
vblwait->request.sequence);
} else { } else {
/* drm_handle_vblank_events will call drm_vblank_put */ /* drm_handle_vblank_events will call drm_vblank_put */
list_add_tail(&e->base.link, &dev->vblank_event_list); list_add_tail(&e->base.link, &dev->vblank_event_list);
@ -1256,14 +1319,9 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
DRM_DEBUG("vblank event on %d, current %d\n", DRM_DEBUG("vblank event on %d, current %d\n",
e->event.sequence, seq); e->event.sequence, seq);
e->event.sequence = seq; list_del(&e->base.link);
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, e->pipe); drm_vblank_put(dev, e->pipe);
list_move_tail(&e->base.link, &e->base.file_priv->event_list); send_vblank_event(dev, e, seq, &now);
wake_up_interruptible(&e->base.file_priv->event_wait);
trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
e->event.sequence);
} }
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);

View File

@ -46,7 +46,7 @@
* *
* Describe @mode using DRM_DEBUG. * Describe @mode using DRM_DEBUG.
*/ */
void drm_mode_debug_printmodeline(struct drm_display_mode *mode) void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
{ {
DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d " DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
"0x%x 0x%x\n", "0x%x 0x%x\n",
@ -558,7 +558,7 @@ EXPORT_SYMBOL(drm_mode_list_concat);
* RETURNS: * RETURNS:
* @mode->hdisplay * @mode->hdisplay
*/ */
int drm_mode_width(struct drm_display_mode *mode) int drm_mode_width(const struct drm_display_mode *mode)
{ {
return mode->hdisplay; return mode->hdisplay;
@ -579,7 +579,7 @@ EXPORT_SYMBOL(drm_mode_width);
* RETURNS: * RETURNS:
* @mode->vdisplay * @mode->vdisplay
*/ */
int drm_mode_height(struct drm_display_mode *mode) int drm_mode_height(const struct drm_display_mode *mode)
{ {
return mode->vdisplay; return mode->vdisplay;
} }
@ -768,7 +768,7 @@ EXPORT_SYMBOL(drm_mode_duplicate);
* RETURNS: * RETURNS:
* True if the modes are equal, false otherwise. * True if the modes are equal, false otherwise.
*/ */
bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2) bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{ {
/* do clock check convert to PICOS so fb modes get matched /* do clock check convert to PICOS so fb modes get matched
* the same */ * the same */

View File

@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{ {
struct pci_dev *root; struct pci_dev *root;
int pos; int pos;
u32 lnkcap, lnkcap2; u32 lnkcap = 0, lnkcap2 = 0;
*mask = 0; *mask = 0;
if (!dev->pdev) if (!dev->pdev)

View File

@ -46,16 +46,24 @@ EXPORT_SYMBOL(drm_vblank_offdelay);
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision); EXPORT_SYMBOL(drm_timestamp_precision);
/*
* Default to use monotonic timestamps for wait-for-vblank and page-flip
* complete events.
*/
unsigned int drm_timestamp_monotonic = 1;
MODULE_AUTHOR(CORE_AUTHOR); MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC); MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights"); MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output"); MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600); module_param_named(debug, drm_debug, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
struct idr drm_minors_idr; struct idr drm_minors_idr;
@ -221,20 +229,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
if (!file_priv->master) if (!file_priv->master)
return -EINVAL; return -EINVAL;
if (!file_priv->minor->master && if (file_priv->minor->master)
file_priv->minor->master != file_priv->master) { return -EINVAL;
mutex_lock(&dev->struct_mutex);
file_priv->minor->master = drm_master_get(file_priv->master); mutex_lock(&dev->struct_mutex);
file_priv->is_master = 1; file_priv->minor->master = drm_master_get(file_priv->master);
if (dev->driver->master_set) { file_priv->is_master = 1;
ret = dev->driver->master_set(dev, file_priv, false); if (dev->driver->master_set) {
if (unlikely(ret != 0)) { ret = dev->driver->master_set(dev, file_priv, false);
file_priv->is_master = 0; if (unlikely(ret != 0)) {
drm_master_put(&file_priv->minor->master); file_priv->is_master = 0;
} drm_master_put(&file_priv->minor->master);
} }
mutex_unlock(&dev->struct_mutex);
} }
mutex_unlock(&dev->struct_mutex);
return 0; return 0;
} }
@ -492,10 +500,7 @@ void drm_put_dev(struct drm_device *dev)
drm_put_minor(&dev->primary); drm_put_minor(&dev->primary);
list_del(&dev->driver_item); list_del(&dev->driver_item);
if (dev->devname) { kfree(dev->devname);
kfree(dev->devname);
dev->devname = NULL;
}
kfree(dev); kfree(dev);
} }
EXPORT_SYMBOL(drm_put_dev); EXPORT_SYMBOL(drm_put_dev);

View File

@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device,
uint64_t dpms_status; uint64_t dpms_status;
int ret; int ret;
ret = drm_connector_property_get_value(connector, ret = drm_object_property_get_value(&connector->base,
dev->mode_config.dpms_property, dev->mode_config.dpms_property,
&dpms_status); &dpms_status);
if (ret) if (ret)
@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device,
return 0; return 0;
} }
ret = drm_connector_property_get_value(connector, prop, &subconnector); ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret) if (ret)
return 0; return 0;
@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device,
return 0; return 0;
} }
ret = drm_connector_property_get_value(connector, prop, &subconnector); ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
if (ret) if (ret)
return 0; return 0;

View File

@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset. Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm. If M is selected the module will be called exynosdrm.
config DRM_EXYNOS_IOMMU
bool "EXYNOS DRM IOMMU Support"
depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
help
Choose this option if you want to use IOMMU feature for DRM.
config DRM_EXYNOS_DMABUF config DRM_EXYNOS_DMABUF
bool "EXYNOS DRM DMABUF" bool "EXYNOS DRM DMABUF"
depends on DRM_EXYNOS depends on DRM_EXYNOS
@ -39,3 +45,27 @@ config DRM_EXYNOS_G2D
depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
help help
Choose this option if you want to use Exynos G2D for DRM. Choose this option if you want to use Exynos G2D for DRM.
config DRM_EXYNOS_IPP
bool "Exynos DRM IPP"
depends on DRM_EXYNOS
help
Choose this option if you want to use IPP feature for DRM.
config DRM_EXYNOS_FIMC
bool "Exynos DRM FIMC"
depends on DRM_EXYNOS_IPP
help
Choose this option if you want to use Exynos FIMC for DRM.
config DRM_EXYNOS_ROTATOR
bool "Exynos DRM Rotator"
depends on DRM_EXYNOS_IPP
help
Choose this option if you want to use Exynos Rotator for DRM.
config DRM_EXYNOS_GSC
bool "Exynos DRM GSC"
depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
help
Choose this option if you want to use Exynos GSC for DRM.

View File

@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o exynos_drm_plane.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
@ -15,5 +16,9 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
exynos_drm_hdmi.o exynos_drm_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o

View File

@ -48,6 +48,7 @@ static struct i2c_device_id ddc_idtable[] = {
{ }, { },
}; };
#ifdef CONFIG_OF
static struct of_device_id hdmiddc_match_types[] = { static struct of_device_id hdmiddc_match_types[] = {
{ {
.compatible = "samsung,exynos5-hdmiddc", .compatible = "samsung,exynos5-hdmiddc",
@ -55,12 +56,13 @@ static struct of_device_id hdmiddc_match_types[] = {
/* end node */ /* end node */
} }
}; };
#endif
struct i2c_driver ddc_driver = { struct i2c_driver ddc_driver = {
.driver = { .driver = {
.name = "exynos-hdmiddc", .name = "exynos-hdmiddc",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.of_match_table = hdmiddc_match_types, .of_match_table = of_match_ptr(hdmiddc_match_types),
}, },
.id_table = ddc_idtable, .id_table = ddc_idtable,
.probe = s5p_ddc_probe, .probe = s5p_ddc_probe,

View File

@ -33,89 +33,64 @@
static int lowlevel_buffer_allocate(struct drm_device *dev, static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf) unsigned int flags, struct exynos_drm_gem_buf *buf)
{ {
dma_addr_t start_addr;
unsigned int npages, i = 0;
struct scatterlist *sgl;
int ret = 0; int ret = 0;
enum dma_attr attr;
unsigned int nr_pages;
DRM_DEBUG_KMS("%s\n", __FILE__); DRM_DEBUG_KMS("%s\n", __FILE__);
if (IS_NONCONTIG_BUFFER(flags)) {
DRM_DEBUG_KMS("not support allocation type.\n");
return -EINVAL;
}
if (buf->dma_addr) { if (buf->dma_addr) {
DRM_DEBUG_KMS("already allocated.\n"); DRM_DEBUG_KMS("already allocated.\n");
return 0; return 0;
} }
if (buf->size >= SZ_1M) { init_dma_attrs(&buf->dma_attrs);
npages = buf->size >> SECTION_SHIFT;
buf->page_size = SECTION_SIZE;
} else if (buf->size >= SZ_64K) {
npages = buf->size >> 16;
buf->page_size = SZ_64K;
} else {
npages = buf->size >> PAGE_SHIFT;
buf->page_size = PAGE_SIZE;
}
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); /*
if (!buf->sgt) { * if EXYNOS_BO_CONTIG, fully physically contiguous memory
DRM_ERROR("failed to allocate sg table.\n"); * region will be allocated else physically contiguous
return -ENOMEM; * as possible.
} */
if (flags & EXYNOS_BO_CONTIG)
dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); /*
if (ret < 0) { * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
DRM_ERROR("failed to initialize sg table.\n"); * else cachable mapping.
kfree(buf->sgt); */
buf->sgt = NULL; if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
return -ENOMEM; attr = DMA_ATTR_WRITE_COMBINE;
} else
attr = DMA_ATTR_NON_CONSISTENT;
buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size, dma_set_attr(attr, &buf->dma_attrs);
&buf->dma_addr, GFP_KERNEL); dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
if (!buf->kvaddr) {
DRM_ERROR("failed to allocate buffer.\n");
ret = -ENOMEM;
goto err1;
}
buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); buf->pages = dma_alloc_attrs(dev->dev, buf->size,
&buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
if (!buf->pages) { if (!buf->pages) {
DRM_ERROR("failed to allocate pages.\n"); DRM_ERROR("failed to allocate buffer.\n");
return -ENOMEM;
}
nr_pages = buf->size >> PAGE_SHIFT;
buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
if (!buf->sgt) {
DRM_ERROR("failed to get sg table.\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err2; goto err_free_attrs;
} }
sgl = buf->sgt->sgl; DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
start_addr = buf->dma_addr;
while (i < npages) {
buf->pages[i] = phys_to_page(start_addr);
sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
sg_dma_address(sgl) = start_addr;
start_addr += buf->page_size;
sgl = sg_next(sgl);
i++;
}
DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->kvaddr,
(unsigned long)buf->dma_addr, (unsigned long)buf->dma_addr,
buf->size); buf->size);
return ret; return ret;
err2:
dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, err_free_attrs:
(dma_addr_t)buf->dma_addr); dma_free_attrs(dev->dev, buf->size, buf->pages,
(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL; buf->dma_addr = (dma_addr_t)NULL;
err1:
sg_free_table(buf->sgt);
kfree(buf->sgt);
buf->sgt = NULL;
return ret; return ret;
} }
@ -125,23 +100,12 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
{ {
DRM_DEBUG_KMS("%s.\n", __FILE__); DRM_DEBUG_KMS("%s.\n", __FILE__);
/*
* release only physically continuous memory and
* non-continuous memory would be released by exynos
* gem framework.
*/
if (IS_NONCONTIG_BUFFER(flags)) {
DRM_DEBUG_KMS("not support allocation type.\n");
return;
}
if (!buf->dma_addr) { if (!buf->dma_addr) {
DRM_DEBUG_KMS("dma_addr is invalid.\n"); DRM_DEBUG_KMS("dma_addr is invalid.\n");
return; return;
} }
DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->kvaddr,
(unsigned long)buf->dma_addr, (unsigned long)buf->dma_addr,
buf->size); buf->size);
@ -150,11 +114,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
kfree(buf->sgt); kfree(buf->sgt);
buf->sgt = NULL; buf->sgt = NULL;
kfree(buf->pages); dma_free_attrs(dev->dev, buf->size, buf->pages,
buf->pages = NULL; (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
(dma_addr_t)buf->dma_addr);
buf->dma_addr = (dma_addr_t)NULL; buf->dma_addr = (dma_addr_t)NULL;
} }

View File

@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
void exynos_drm_fini_buf(struct drm_device *dev, void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer); struct exynos_drm_gem_buf *buffer);
/* allocate physical memory region and setup sgt and pages. */ /* allocate physical memory region and setup sgt. */
int exynos_drm_alloc_buf(struct drm_device *dev, int exynos_drm_alloc_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buf, struct exynos_drm_gem_buf *buf,
unsigned int flags); unsigned int flags);
/* release physical memory region, sgt and pages. */ /* release physical memory region, and sgt. */
void exynos_drm_free_buf(struct drm_device *dev, void exynos_drm_free_buf(struct drm_device *dev,
unsigned int flags, unsigned int flags,
struct exynos_drm_gem_buf *buffer); struct exynos_drm_gem_buf *buffer);

View File

@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
goto out; goto out;
} }
spin_lock_irq(&dev->event_lock);
list_add_tail(&event->base.link, list_add_tail(&event->base.link,
&dev_priv->pageflip_event_list); &dev_priv->pageflip_event_list);
spin_unlock_irq(&dev->event_lock);
crtc->fb = fb; crtc->fb = fb;
ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
NULL); NULL);
if (ret) { if (ret) {
crtc->fb = old_fb; crtc->fb = old_fb;
spin_lock_irq(&dev->event_lock);
drm_vblank_put(dev, exynos_crtc->pipe); drm_vblank_put(dev, exynos_crtc->pipe);
list_del(&event->base.link); list_del(&event->base.link);
spin_unlock_irq(&dev->event_lock);
goto out; goto out;
} }

View File

@ -30,70 +30,108 @@
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages, struct exynos_drm_dmabuf_attachment {
unsigned int page_size) struct sg_table sgt;
enum dma_data_direction dir;
};
static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
struct device *dev,
struct dma_buf_attachment *attach)
{ {
struct sg_table *sgt = NULL; struct exynos_drm_dmabuf_attachment *exynos_attach;
struct scatterlist *sgl;
int i, ret;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
if (!sgt) if (!exynos_attach)
goto out; return -ENOMEM;
ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL); exynos_attach->dir = DMA_NONE;
if (ret) attach->priv = exynos_attach;
goto err_free_sgt;
if (page_size < PAGE_SIZE) return 0;
page_size = PAGE_SIZE; }
for_each_sg(sgt->sgl, sgl, nr_pages, i) static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
sg_set_page(sgl, pages[i], page_size, 0); struct dma_buf_attachment *attach)
{
struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
struct sg_table *sgt;
return sgt; if (!exynos_attach)
return;
err_free_sgt: sgt = &exynos_attach->sgt;
kfree(sgt);
sgt = NULL; if (exynos_attach->dir != DMA_NONE)
out: dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
return NULL; exynos_attach->dir);
sg_free_table(sgt);
kfree(exynos_attach);
attach->priv = NULL;
} }
static struct sg_table * static struct sg_table *
exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv; struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
struct drm_device *dev = gem_obj->base.dev; struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf; struct exynos_drm_gem_buf *buf;
struct scatterlist *rd, *wr;
struct sg_table *sgt = NULL; struct sg_table *sgt = NULL;
unsigned int npages; unsigned int i;
int nents; int nents, ret;
DRM_DEBUG_PRIME("%s\n", __FILE__); DRM_DEBUG_PRIME("%s\n", __FILE__);
mutex_lock(&dev->struct_mutex); if (WARN_ON(dir == DMA_NONE))
return ERR_PTR(-EINVAL);
/* just return current sgt if already requested. */
if (exynos_attach->dir == dir)
return &exynos_attach->sgt;
/* reattaching is not allowed. */
if (WARN_ON(exynos_attach->dir != DMA_NONE))
return ERR_PTR(-EBUSY);
buf = gem_obj->buffer; buf = gem_obj->buffer;
if (!buf) {
DRM_ERROR("buffer is null.\n");
return ERR_PTR(-ENOMEM);
}
/* there should always be pages allocated. */ sgt = &exynos_attach->sgt;
if (!buf->pages) {
DRM_ERROR("pages is null.\n"); ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
if (ret) {
DRM_ERROR("failed to alloc sgt.\n");
return ERR_PTR(-ENOMEM);
}
mutex_lock(&dev->struct_mutex);
rd = buf->sgt->sgl;
wr = sgt->sgl;
for (i = 0; i < sgt->orig_nents; ++i) {
sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
rd = sg_next(rd);
wr = sg_next(wr);
}
nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
if (!nents) {
DRM_ERROR("failed to map sgl with iommu.\n");
sgt = ERR_PTR(-EIO);
goto err_unlock; goto err_unlock;
} }
npages = buf->size / buf->page_size; exynos_attach->dir = dir;
attach->priv = exynos_attach;
sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size); DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
if (!sgt) {
DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
goto err_unlock;
}
nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
npages, buf->size, buf->page_size);
err_unlock: err_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
@ -104,10 +142,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt, struct sg_table *sgt,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); /* Nothing to do. */
sg_free_table(sgt);
kfree(sgt);
sgt = NULL;
} }
static void exynos_dmabuf_release(struct dma_buf *dmabuf) static void exynos_dmabuf_release(struct dma_buf *dmabuf)
@ -169,6 +204,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
} }
static struct dma_buf_ops exynos_dmabuf_ops = { static struct dma_buf_ops exynos_dmabuf_ops = {
.attach = exynos_gem_attach_dma_buf,
.detach = exynos_gem_detach_dma_buf,
.map_dma_buf = exynos_gem_map_dma_buf, .map_dma_buf = exynos_gem_map_dma_buf,
.unmap_dma_buf = exynos_gem_unmap_dma_buf, .unmap_dma_buf = exynos_gem_unmap_dma_buf,
.kmap = exynos_gem_dmabuf_kmap, .kmap = exynos_gem_dmabuf_kmap,
@ -196,7 +233,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct scatterlist *sgl; struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer; struct exynos_drm_gem_buf *buffer;
struct page *page;
int ret; int ret;
DRM_DEBUG_PRIME("%s\n", __FILE__); DRM_DEBUG_PRIME("%s\n", __FILE__);
@ -233,38 +269,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
goto err_unmap_attach; goto err_unmap_attach;
} }
buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL); exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!buffer->pages) { if (!exynos_gem_obj) {
DRM_ERROR("failed to allocate pages.\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err_free_buffer; goto err_free_buffer;
} }
exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
goto err_free_pages;
}
sgl = sgt->sgl; sgl = sgt->sgl;
if (sgt->nents == 1) { buffer->size = dma_buf->size;
buffer->dma_addr = sg_dma_address(sgt->sgl); buffer->dma_addr = sg_dma_address(sgl);
buffer->size = sg_dma_len(sgt->sgl);
if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */ /* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else { } else {
unsigned int i = 0; /*
* this case could be CONTIG or NONCONTIG type but for now
buffer->dma_addr = sg_dma_address(sgl); * sets NONCONTIG.
while (i < sgt->nents) { * TODO. we have to find a way that exporter can notify
buffer->pages[i] = sg_page(sgl); * the type of its own buffer to importer.
buffer->size += sg_dma_len(sgl); */
sgl = sg_next(sgl);
i++;
}
exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
} }
@ -277,9 +302,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
return &exynos_gem_obj->base; return &exynos_gem_obj->base;
err_free_pages:
kfree(buffer->pages);
buffer->pages = NULL;
err_free_buffer: err_free_buffer:
kfree(buffer); kfree(buffer);
buffer = NULL; buffer = NULL;

View File

@ -40,6 +40,8 @@
#include "exynos_drm_vidi.h" #include "exynos_drm_vidi.h"
#include "exynos_drm_dmabuf.h" #include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h" #include "exynos_drm_g2d.h"
#include "exynos_drm_ipp.h"
#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos" #define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM" #define DRIVER_DESC "Samsung SoC DRM"
@ -49,6 +51,9 @@
#define VBLANK_OFF_DELAY 50000 #define VBLANK_OFF_DELAY 50000
/* platform device pointer for eynos drm device. */
static struct platform_device *exynos_drm_pdev;
static int exynos_drm_load(struct drm_device *dev, unsigned long flags) static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{ {
struct exynos_drm_private *private; struct exynos_drm_private *private;
@ -66,6 +71,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&private->pageflip_event_list); INIT_LIST_HEAD(&private->pageflip_event_list);
dev->dev_private = (void *)private; dev->dev_private = (void *)private;
/*
* create mapping to manage iommu table and set a pointer to iommu
* mapping structure to iommu_mapping of private data.
* also this iommu_mapping can be used to check if iommu is supported
* or not.
*/
ret = drm_create_iommu_mapping(dev);
if (ret < 0) {
DRM_ERROR("failed to create iommu mapping.\n");
goto err_crtc;
}
drm_mode_config_init(dev); drm_mode_config_init(dev);
/* init kms poll for handling hpd */ /* init kms poll for handling hpd */
@ -80,7 +97,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
for (nr = 0; nr < MAX_CRTC; nr++) { for (nr = 0; nr < MAX_CRTC; nr++) {
ret = exynos_drm_crtc_create(dev, nr); ret = exynos_drm_crtc_create(dev, nr);
if (ret) if (ret)
goto err_crtc; goto err_release_iommu_mapping;
} }
for (nr = 0; nr < MAX_PLANE; nr++) { for (nr = 0; nr < MAX_PLANE; nr++) {
@ -89,12 +106,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
plane = exynos_plane_init(dev, possible_crtcs, false); plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane) if (!plane)
goto err_crtc; goto err_release_iommu_mapping;
} }
ret = drm_vblank_init(dev, MAX_CRTC); ret = drm_vblank_init(dev, MAX_CRTC);
if (ret) if (ret)
goto err_crtc; goto err_release_iommu_mapping;
/* /*
* probe sub drivers such as display controller and hdmi driver, * probe sub drivers such as display controller and hdmi driver,
@ -126,6 +143,8 @@ err_drm_device:
exynos_drm_device_unregister(dev); exynos_drm_device_unregister(dev);
err_vblank: err_vblank:
drm_vblank_cleanup(dev); drm_vblank_cleanup(dev);
err_release_iommu_mapping:
drm_release_iommu_mapping(dev);
err_crtc: err_crtc:
drm_mode_config_cleanup(dev); drm_mode_config_cleanup(dev);
kfree(private); kfree(private);
@ -142,6 +161,8 @@ static int exynos_drm_unload(struct drm_device *dev)
drm_vblank_cleanup(dev); drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev); drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev); drm_mode_config_cleanup(dev);
drm_release_iommu_mapping(dev);
kfree(dev->dev_private); kfree(dev->dev_private);
dev->dev_private = NULL; dev->dev_private = NULL;
@ -229,6 +250,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH), exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH), exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
}; };
static const struct file_operations exynos_drm_driver_fops = { static const struct file_operations exynos_drm_driver_fops = {
@ -279,6 +308,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
{ {
DRM_DEBUG_DRIVER("%s\n", __FILE__); DRM_DEBUG_DRIVER("%s\n", __FILE__);
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls); exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
return drm_platform_init(&exynos_drm_driver, pdev); return drm_platform_init(&exynos_drm_driver, pdev);
@ -324,6 +354,10 @@ static int __init exynos_drm_init(void)
ret = platform_driver_register(&exynos_drm_common_hdmi_driver); ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
if (ret < 0) if (ret < 0)
goto out_common_hdmi; goto out_common_hdmi;
ret = exynos_platform_device_hdmi_register();
if (ret < 0)
goto out_common_hdmi_dev;
#endif #endif
#ifdef CONFIG_DRM_EXYNOS_VIDI #ifdef CONFIG_DRM_EXYNOS_VIDI
@ -338,24 +372,80 @@ static int __init exynos_drm_init(void)
goto out_g2d; goto out_g2d;
#endif #endif
#ifdef CONFIG_DRM_EXYNOS_FIMC
ret = platform_driver_register(&fimc_driver);
if (ret < 0)
goto out_fimc;
#endif
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
ret = platform_driver_register(&rotator_driver);
if (ret < 0)
goto out_rotator;
#endif
#ifdef CONFIG_DRM_EXYNOS_GSC
ret = platform_driver_register(&gsc_driver);
if (ret < 0)
goto out_gsc;
#endif
#ifdef CONFIG_DRM_EXYNOS_IPP
ret = platform_driver_register(&ipp_driver);
if (ret < 0)
goto out_ipp;
#endif
ret = platform_driver_register(&exynos_drm_platform_driver); ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret < 0) if (ret < 0)
goto out_drm;
exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
NULL, 0);
if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
ret = PTR_ERR(exynos_drm_pdev);
goto out; goto out;
}
return 0; return 0;
out: out:
platform_driver_unregister(&exynos_drm_platform_driver);
out_drm:
#ifdef CONFIG_DRM_EXYNOS_IPP
platform_driver_unregister(&ipp_driver);
out_ipp:
#endif
#ifdef CONFIG_DRM_EXYNOS_GSC
platform_driver_unregister(&gsc_driver);
out_gsc:
#endif
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
platform_driver_unregister(&rotator_driver);
out_rotator:
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMC
platform_driver_unregister(&fimc_driver);
out_fimc:
#endif
#ifdef CONFIG_DRM_EXYNOS_G2D #ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver); platform_driver_unregister(&g2d_driver);
out_g2d: out_g2d:
#endif #endif
#ifdef CONFIG_DRM_EXYNOS_VIDI #ifdef CONFIG_DRM_EXYNOS_VIDI
out_vidi:
platform_driver_unregister(&vidi_driver); platform_driver_unregister(&vidi_driver);
out_vidi:
#endif #endif
#ifdef CONFIG_DRM_EXYNOS_HDMI #ifdef CONFIG_DRM_EXYNOS_HDMI
exynos_platform_device_hdmi_unregister();
out_common_hdmi_dev:
platform_driver_unregister(&exynos_drm_common_hdmi_driver); platform_driver_unregister(&exynos_drm_common_hdmi_driver);
out_common_hdmi: out_common_hdmi:
platform_driver_unregister(&mixer_driver); platform_driver_unregister(&mixer_driver);
@ -375,13 +465,32 @@ static void __exit exynos_drm_exit(void)
{ {
DRM_DEBUG_DRIVER("%s\n", __FILE__); DRM_DEBUG_DRIVER("%s\n", __FILE__);
platform_device_unregister(exynos_drm_pdev);
platform_driver_unregister(&exynos_drm_platform_driver); platform_driver_unregister(&exynos_drm_platform_driver);
#ifdef CONFIG_DRM_EXYNOS_IPP
platform_driver_unregister(&ipp_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_GSC
platform_driver_unregister(&gsc_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
platform_driver_unregister(&rotator_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMC
platform_driver_unregister(&fimc_driver);
#endif
#ifdef CONFIG_DRM_EXYNOS_G2D #ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver); platform_driver_unregister(&g2d_driver);
#endif #endif
#ifdef CONFIG_DRM_EXYNOS_HDMI #ifdef CONFIG_DRM_EXYNOS_HDMI
exynos_platform_device_hdmi_unregister();
platform_driver_unregister(&exynos_drm_common_hdmi_driver); platform_driver_unregister(&exynos_drm_common_hdmi_driver);
platform_driver_unregister(&mixer_driver); platform_driver_unregister(&mixer_driver);
platform_driver_unregister(&hdmi_driver); platform_driver_unregister(&hdmi_driver);

View File

@ -74,8 +74,6 @@ enum exynos_drm_output_type {
* @commit: apply hardware specific overlay data to registers. * @commit: apply hardware specific overlay data to registers.
* @enable: enable hardware specific overlay. * @enable: enable hardware specific overlay.
* @disable: disable hardware specific overlay. * @disable: disable hardware specific overlay.
* @wait_for_vblank: wait for vblank interrupt to make sure that
* hardware overlay is disabled.
*/ */
struct exynos_drm_overlay_ops { struct exynos_drm_overlay_ops {
void (*mode_set)(struct device *subdrv_dev, void (*mode_set)(struct device *subdrv_dev,
@ -83,7 +81,6 @@ struct exynos_drm_overlay_ops {
void (*commit)(struct device *subdrv_dev, int zpos); void (*commit)(struct device *subdrv_dev, int zpos);
void (*enable)(struct device *subdrv_dev, int zpos); void (*enable)(struct device *subdrv_dev, int zpos);
void (*disable)(struct device *subdrv_dev, int zpos); void (*disable)(struct device *subdrv_dev, int zpos);
void (*wait_for_vblank)(struct device *subdrv_dev);
}; };
/* /*
@ -110,7 +107,6 @@ struct exynos_drm_overlay_ops {
* @pixel_format: fourcc pixel format of this overlay * @pixel_format: fourcc pixel format of this overlay
* @dma_addr: array of bus(accessed by dma) address to the memory region * @dma_addr: array of bus(accessed by dma) address to the memory region
* allocated for a overlay. * allocated for a overlay.
* @vaddr: array of virtual memory addresss to this overlay.
* @zpos: order of overlay layer(z position). * @zpos: order of overlay layer(z position).
* @default_win: a window to be enabled. * @default_win: a window to be enabled.
* @color_key: color key on or off. * @color_key: color key on or off.
@ -142,7 +138,6 @@ struct exynos_drm_overlay {
unsigned int pitch; unsigned int pitch;
uint32_t pixel_format; uint32_t pixel_format;
dma_addr_t dma_addr[MAX_FB_BUFFER]; dma_addr_t dma_addr[MAX_FB_BUFFER];
void __iomem *vaddr[MAX_FB_BUFFER];
int zpos; int zpos;
bool default_win; bool default_win;
@ -186,6 +181,8 @@ struct exynos_drm_display_ops {
* @commit: set current hw specific display mode to hw. * @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt. * @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt. * @disable_vblank: specific driver callback for disabling vblank interrupt.
* @wait_for_vblank: wait for vblank interrupt to make sure that
* hardware overlay is updated.
*/ */
struct exynos_drm_manager_ops { struct exynos_drm_manager_ops {
void (*dpms)(struct device *subdrv_dev, int mode); void (*dpms)(struct device *subdrv_dev, int mode);
@ -200,6 +197,7 @@ struct exynos_drm_manager_ops {
void (*commit)(struct device *subdrv_dev); void (*commit)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev); int (*enable_vblank)(struct device *subdrv_dev);
void (*disable_vblank)(struct device *subdrv_dev); void (*disable_vblank)(struct device *subdrv_dev);
void (*wait_for_vblank)(struct device *subdrv_dev);
}; };
/* /*
@ -231,16 +229,28 @@ struct exynos_drm_g2d_private {
struct device *dev; struct device *dev;
struct list_head inuse_cmdlist; struct list_head inuse_cmdlist;
struct list_head event_list; struct list_head event_list;
struct list_head gem_list; struct list_head userptr_list;
unsigned int gem_nr; };
struct exynos_drm_ipp_private {
struct device *dev;
struct list_head event_list;
}; };
struct drm_exynos_file_private { struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv; struct exynos_drm_g2d_private *g2d_priv;
struct exynos_drm_ipp_private *ipp_priv;
}; };
/* /*
* Exynos drm private structure. * Exynos drm private structure.
*
* @da_start: start address to device address space.
* with iommu, device address space starts from this address
* otherwise default one.
* @da_space_size: size of device address space.
* if 0 then default value is used for it.
* @da_space_order: order to device address space.
*/ */
struct exynos_drm_private { struct exynos_drm_private {
struct drm_fb_helper *fb_helper; struct drm_fb_helper *fb_helper;
@ -255,6 +265,10 @@ struct exynos_drm_private {
struct drm_crtc *crtc[MAX_CRTC]; struct drm_crtc *crtc[MAX_CRTC];
struct drm_property *plane_zpos_property; struct drm_property *plane_zpos_property;
struct drm_property *crtc_mode_property; struct drm_property *crtc_mode_property;
unsigned long da_start;
unsigned long da_space_size;
unsigned long da_space_order;
}; };
/* /*
@ -318,10 +332,25 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file); int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file); void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
/*
* this function registers exynos drm hdmi platform device. It ensures only one
* instance of the device is created.
*/
extern int exynos_platform_device_hdmi_register(void);
/*
* this function unregisters exynos drm hdmi platform device if it exists.
*/
void exynos_platform_device_hdmi_unregister(void);
extern struct platform_driver fimd_driver; extern struct platform_driver fimd_driver;
extern struct platform_driver hdmi_driver; extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver; extern struct platform_driver mixer_driver;
extern struct platform_driver exynos_drm_common_hdmi_driver; extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver; extern struct platform_driver vidi_driver;
extern struct platform_driver g2d_driver; extern struct platform_driver g2d_driver;
extern struct platform_driver fimc_driver;
extern struct platform_driver rotator_driver;
extern struct platform_driver gsc_driver;
extern struct platform_driver ipp_driver;
#endif #endif

View File

@ -234,6 +234,32 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
exynos_encoder->dpms = DRM_MODE_DPMS_ON; exynos_encoder->dpms = DRM_MODE_DPMS_ON;
} }
void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
{
struct exynos_drm_encoder *exynos_encoder;
struct exynos_drm_manager_ops *ops;
struct drm_device *dev = fb->dev;
struct drm_encoder *encoder;
/*
* make sure that overlay data are updated to real hardware
* for all encoders.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
exynos_encoder = to_exynos_encoder(encoder);
ops = exynos_encoder->manager->ops;
/*
* wait for vblank interrupt
* - this makes sure that overlay data are updated to
* real hardware.
*/
if (ops->wait_for_vblank)
ops->wait_for_vblank(exynos_encoder->manager->dev);
}
}
static void exynos_drm_encoder_disable(struct drm_encoder *encoder) static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
{ {
struct drm_plane *plane; struct drm_plane *plane;
@ -505,14 +531,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
if (overlay_ops && overlay_ops->disable) if (overlay_ops && overlay_ops->disable)
overlay_ops->disable(manager->dev, zpos); overlay_ops->disable(manager->dev, zpos);
/*
* wait for vblank interrupt
* - this makes sure that hardware overlay is disabled to avoid
* for the dma accesses to memory after gem buffer was released
* because the setting for disabling the overlay will be updated
* at vsync.
*/
if (overlay_ops && overlay_ops->wait_for_vblank)
overlay_ops->wait_for_vblank(manager->dev);
} }

View File

@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
#endif #endif

View File

@ -30,10 +30,13 @@
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
#include <uapi/drm/exynos_drm.h>
#include "exynos_drm_drv.h" #include "exynos_drm_drv.h"
#include "exynos_drm_fb.h" #include "exynos_drm_fb.h"
#include "exynos_drm_gem.h" #include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"
#include "exynos_drm_encoder.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) #define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
@ -50,6 +53,32 @@ struct exynos_drm_fb {
struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER]; struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
}; };
static int check_fb_gem_memory_type(struct drm_device *drm_dev,
struct exynos_drm_gem_obj *exynos_gem_obj)
{
unsigned int flags;
/*
* if exynos drm driver supports iommu then framebuffer can use
* all the buffer types.
*/
if (is_drm_iommu_supported(drm_dev))
return 0;
flags = exynos_gem_obj->flags;
/*
* without iommu support, not support physically non-continuous memory
* for framebuffer.
*/
if (IS_NONCONTIG_BUFFER(flags)) {
DRM_ERROR("cannot use this gem memory type for fb.\n");
return -EINVAL;
}
return 0;
}
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{ {
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
DRM_DEBUG_KMS("%s\n", __FILE__); DRM_DEBUG_KMS("%s\n", __FILE__);
/* make sure that overlay data are updated before relesing fb. */
exynos_drm_encoder_complete_scanout(fb);
drm_framebuffer_cleanup(fb); drm_framebuffer_cleanup(fb);
for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@ -128,23 +160,32 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj) struct drm_gem_object *obj)
{ {
struct exynos_drm_fb *exynos_fb; struct exynos_drm_fb *exynos_fb;
struct exynos_drm_gem_obj *exynos_gem_obj;
int ret; int ret;
exynos_gem_obj = to_exynos_gem_obj(obj);
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
if (ret < 0) {
DRM_ERROR("cannot use this gem memory type for fb.\n");
return ERR_PTR(-EINVAL);
}
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) { if (!exynos_fb) {
DRM_ERROR("failed to allocate exynos drm framebuffer\n"); DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) { if (ret) {
DRM_ERROR("failed to initialize framebuffer\n"); DRM_ERROR("failed to initialize framebuffer\n");
return ERR_PTR(ret); return ERR_PTR(ret);
} }
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
return &exynos_fb->fb; return &exynos_fb->fb;
} }
@ -190,9 +231,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd) struct drm_mode_fb_cmd2 *mode_cmd)
{ {
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct drm_framebuffer *fb;
struct exynos_drm_fb *exynos_fb; struct exynos_drm_fb *exynos_fb;
int i; int i, ret;
DRM_DEBUG_KMS("%s\n", __FILE__); DRM_DEBUG_KMS("%s\n", __FILE__);
@ -202,30 +242,56 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
} }
fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj); exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (IS_ERR(fb)) { if (!exynos_fb) {
drm_gem_object_unreference_unlocked(obj); DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return fb; return ERR_PTR(-ENOMEM);
} }
exynos_fb = to_exynos_fb(fb); drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd); exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < exynos_fb->buf_cnt; i++) { for (i = 1; i < exynos_fb->buf_cnt; i++) {
struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
obj = drm_gem_object_lookup(dev, file_priv, obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]); mode_cmd->handles[i]);
if (!obj) { if (!obj) {
DRM_ERROR("failed to lookup gem object\n"); DRM_ERROR("failed to lookup gem object\n");
exynos_drm_fb_destroy(fb); kfree(exynos_fb);
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
} }
exynos_gem_obj = to_exynos_gem_obj(obj);
ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
if (ret < 0) {
DRM_ERROR("cannot use this gem memory type for fb.\n");
kfree(exynos_fb);
return ERR_PTR(ret);
}
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
} }
return fb; ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
for (i = 0; i < exynos_fb->buf_cnt; i++) {
struct exynos_drm_gem_obj *gem_obj;
gem_obj = exynos_fb->exynos_gem_obj[i];
drm_gem_object_unreference_unlocked(&gem_obj->base);
}
kfree(exynos_fb);
return ERR_PTR(ret);
}
return &exynos_fb->fb;
} }
struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
@ -243,9 +309,7 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
if (!buffer) if (!buffer)
return NULL; return NULL;
DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n", DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
(unsigned long)buffer->kvaddr,
(unsigned long)buffer->dma_addr);
return buffer; return buffer;
} }

View File

@ -46,8 +46,38 @@ struct exynos_drm_fbdev {
struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_obj *exynos_gem_obj;
}; };
static int exynos_drm_fb_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
struct drm_fb_helper *helper = info->par;
struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
unsigned long vm_size;
int ret;
DRM_DEBUG_KMS("%s\n", __func__);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vm_size = vma->vm_end - vma->vm_start;
if (vm_size > buffer->size)
return -EINVAL;
ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
buffer->dma_addr, buffer->size, &buffer->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
}
return 0;
}
static struct fb_ops exynos_drm_fb_ops = { static struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.fb_mmap = exynos_drm_fb_mmap,
.fb_fillrect = cfb_fillrect, .fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea, .fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit, .fb_imageblit = cfb_imageblit,
@ -79,6 +109,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
return -EFAULT; return -EFAULT;
} }
/* map pages with kernel virtual space. */
if (!buffer->kvaddr) {
unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
if (!buffer->kvaddr) {
DRM_ERROR("failed to map pages to kernel space.\n");
return -EIO;
}
}
/* buffer count to framebuffer always is 1 at booting time. */ /* buffer count to framebuffer always is 1 at booting time. */
exynos_drm_fb_set_buf_cnt(fb, 1); exynos_drm_fb_set_buf_cnt(fb, 1);
@ -87,8 +128,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset; fbi->screen_base = buffer->kvaddr + offset;
fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) + fbi->fix.smem_start = (unsigned long)
offset); (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
fbi->screen_size = size; fbi->screen_size = size;
fbi->fix.smem_len = size; fbi->fix.smem_len = size;
@ -134,7 +175,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
exynos_gem_obj = exynos_drm_gem_create(dev, 0, size); exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
if (IS_ERR(exynos_gem_obj)) { if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj); ret = PTR_ERR(exynos_gem_obj);
goto out; goto err_release_framebuffer;
} }
exynos_fbdev->exynos_gem_obj = exynos_gem_obj; exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@ -144,7 +185,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
if (IS_ERR_OR_NULL(helper->fb)) { if (IS_ERR_OR_NULL(helper->fb)) {
DRM_ERROR("failed to create drm framebuffer.\n"); DRM_ERROR("failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb); ret = PTR_ERR(helper->fb);
goto out; goto err_destroy_gem;
} }
helper->fbdev = fbi; helper->fbdev = fbi;
@ -156,14 +197,24 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
ret = fb_alloc_cmap(&fbi->cmap, 256, 0); ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
if (ret) { if (ret) {
DRM_ERROR("failed to allocate cmap.\n"); DRM_ERROR("failed to allocate cmap.\n");
goto out; goto err_destroy_framebuffer;
} }
ret = exynos_drm_fbdev_update(helper, helper->fb); ret = exynos_drm_fbdev_update(helper, helper->fb);
if (ret < 0) { if (ret < 0)
fb_dealloc_cmap(&fbi->cmap); goto err_dealloc_cmap;
goto out;
} mutex_unlock(&dev->struct_mutex);
return ret;
err_dealloc_cmap:
fb_dealloc_cmap(&fbi->cmap);
err_destroy_framebuffer:
drm_framebuffer_cleanup(helper->fb);
err_destroy_gem:
exynos_drm_gem_destroy(exynos_gem_obj);
err_release_framebuffer:
framebuffer_release(fbi);
/* /*
* if failed, all resources allocated above would be released by * if failed, all resources allocated above would be released by
@ -265,8 +316,13 @@ err_init:
static void exynos_drm_fbdev_destroy(struct drm_device *dev, static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct drm_fb_helper *fb_helper) struct drm_fb_helper *fb_helper)
{ {
struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
if (exynos_gem_obj->buffer->kvaddr)
vunmap(exynos_gem_obj->buffer->kvaddr);
/* release drm framebuffer and real buffer */ /* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) { if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb; fb = fb_helper->fb;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
*
* Authors:
* Eunchul Kim <chulspro.kim@samsung.com>
* Jinyoung Jeon <jy0.jeon@samsung.com>
* Sangmin Lee <lsmin.lee@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _EXYNOS_DRM_FIMC_H_
#define _EXYNOS_DRM_FIMC_H_
/*
* TODO
* FIMD output interface notifier callback.
*/
#endif /* _EXYNOS_DRM_FIMC_H_ */

View File

@ -17,6 +17,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <video/samsung_fimd.h> #include <video/samsung_fimd.h>
@ -25,6 +26,7 @@
#include "exynos_drm_drv.h" #include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h" #include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h" #include "exynos_drm_crtc.h"
#include "exynos_drm_iommu.h"
/* /*
* FIMD is stand for Fully Interactive Mobile Display and * FIMD is stand for Fully Interactive Mobile Display and
@ -78,10 +80,10 @@ struct fimd_win_data {
unsigned int fb_height; unsigned int fb_height;
unsigned int bpp; unsigned int bpp;
dma_addr_t dma_addr; dma_addr_t dma_addr;
void __iomem *vaddr;
unsigned int buf_offsize; unsigned int buf_offsize;
unsigned int line_size; /* bytes */ unsigned int line_size; /* bytes */
bool enabled; bool enabled;
bool resume;
}; };
struct fimd_context { struct fimd_context {
@ -99,13 +101,34 @@ struct fimd_context {
u32 vidcon1; u32 vidcon1;
bool suspended; bool suspended;
struct mutex lock; struct mutex lock;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
struct exynos_drm_panel_info *panel; struct exynos_drm_panel_info *panel;
}; };
#ifdef CONFIG_OF
static const struct of_device_id fimd_driver_dt_match[] = {
{ .compatible = "samsung,exynos4-fimd",
.data = &exynos4_fimd_driver_data },
{ .compatible = "samsung,exynos5-fimd",
.data = &exynos5_fimd_driver_data },
{},
};
MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
#endif
static inline struct fimd_driver_data *drm_fimd_get_driver_data( static inline struct fimd_driver_data *drm_fimd_get_driver_data(
struct platform_device *pdev) struct platform_device *pdev)
{ {
#ifdef CONFIG_OF
const struct of_device_id *of_id =
of_match_device(fimd_driver_dt_match, &pdev->dev);
if (of_id)
return (struct fimd_driver_data *)of_id->data;
#endif
return (struct fimd_driver_data *) return (struct fimd_driver_data *)
platform_get_device_id(pdev)->driver_data; platform_get_device_id(pdev)->driver_data;
} }
@ -240,7 +263,9 @@ static void fimd_commit(struct device *dev)
/* setup horizontal and vertical display size. */ /* setup horizontal and vertical display size. */
val = VIDTCON2_LINEVAL(timing->yres - 1) | val = VIDTCON2_LINEVAL(timing->yres - 1) |
VIDTCON2_HOZVAL(timing->xres - 1); VIDTCON2_HOZVAL(timing->xres - 1) |
VIDTCON2_LINEVAL_E(timing->yres - 1) |
VIDTCON2_HOZVAL_E(timing->xres - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON2); writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
/* setup clock source, clock divider, enable dma. */ /* setup clock source, clock divider, enable dma. */
@ -307,12 +332,32 @@ static void fimd_disable_vblank(struct device *dev)
} }
} }
static void fimd_wait_for_vblank(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
if (ctx->suspended)
return;
atomic_set(&ctx->wait_vsync_event, 1);
/*
* wait for FIMD to signal VSYNC interrupt or return after
* timeout which is set to 50ms (refresh rate of 20).
*/
if (!wait_event_timeout(ctx->wait_vsync_queue,
!atomic_read(&ctx->wait_vsync_event),
DRM_HZ/20))
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
static struct exynos_drm_manager_ops fimd_manager_ops = { static struct exynos_drm_manager_ops fimd_manager_ops = {
.dpms = fimd_dpms, .dpms = fimd_dpms,
.apply = fimd_apply, .apply = fimd_apply,
.commit = fimd_commit, .commit = fimd_commit,
.enable_vblank = fimd_enable_vblank, .enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank, .disable_vblank = fimd_disable_vblank,
.wait_for_vblank = fimd_wait_for_vblank,
}; };
static void fimd_win_mode_set(struct device *dev, static void fimd_win_mode_set(struct device *dev,
@ -351,7 +396,6 @@ static void fimd_win_mode_set(struct device *dev,
win_data->fb_width = overlay->fb_width; win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height; win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset; win_data->dma_addr = overlay->dma_addr[0] + offset;
win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp; win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3); (overlay->bpp >> 3);
@ -361,9 +405,7 @@ static void fimd_win_mode_set(struct device *dev,
win_data->offset_x, win_data->offset_y); win_data->offset_x, win_data->offset_y);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height); win_data->ovl_width, win_data->ovl_height);
DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
(unsigned long)win_data->dma_addr,
(unsigned long)win_data->vaddr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width); overlay->fb_width, overlay->crtc_width);
} }
@ -451,6 +493,8 @@ static void fimd_win_commit(struct device *dev, int zpos)
struct fimd_win_data *win_data; struct fimd_win_data *win_data;
int win = zpos; int win = zpos;
unsigned long val, alpha, size; unsigned long val, alpha, size;
unsigned int last_x;
unsigned int last_y;
DRM_DEBUG_KMS("%s\n", __FILE__); DRM_DEBUG_KMS("%s\n", __FILE__);
@ -496,24 +540,32 @@ static void fimd_win_commit(struct device *dev, int zpos)
/* buffer size */ /* buffer size */
val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) | val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size); VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0)); writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
/* OSD position */ /* OSD position */
val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) | val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
VIDOSDxA_TOPLEFT_Y(win_data->offset_y); VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
writel(val, ctx->regs + VIDOSD_A(win)); writel(val, ctx->regs + VIDOSD_A(win));
val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x + last_x = win_data->offset_x + win_data->ovl_width;
win_data->ovl_width - 1) | if (last_x)
VIDOSDxB_BOTRIGHT_Y(win_data->offset_y + last_x--;
win_data->ovl_height - 1); last_y = win_data->offset_y + win_data->ovl_height;
if (last_y)
last_y--;
val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
writel(val, ctx->regs + VIDOSD_B(win)); writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n", DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
win_data->offset_x, win_data->offset_y, win_data->offset_x, win_data->offset_y, last_x, last_y);
win_data->offset_x + win_data->ovl_width - 1,
win_data->offset_y + win_data->ovl_height - 1);
/* hardware window 0 doesn't support alpha channel. */ /* hardware window 0 doesn't support alpha channel. */
if (win != 0) { if (win != 0) {
@ -573,6 +625,12 @@ static void fimd_win_disable(struct device *dev, int zpos)
win_data = &ctx->win_data[win]; win_data = &ctx->win_data[win];
if (ctx->suspended) {
/* do not resume this window*/
win_data->resume = false;
return;
}
/* protect windows */ /* protect windows */
val = readl(ctx->regs + SHADOWCON); val = readl(ctx->regs + SHADOWCON);
val |= SHADOWCON_WINx_PROTECT(win); val |= SHADOWCON_WINx_PROTECT(win);
@ -592,22 +650,10 @@ static void fimd_win_disable(struct device *dev, int zpos)
win_data->enabled = false; win_data->enabled = false;
} }
static void fimd_wait_for_vblank(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
int ret;
ret = wait_for((__raw_readl(ctx->regs + VIDCON1) &
VIDCON1_VSTATUS_VSYNC), 50);
if (ret < 0)
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
static struct exynos_drm_overlay_ops fimd_overlay_ops = { static struct exynos_drm_overlay_ops fimd_overlay_ops = {
.mode_set = fimd_win_mode_set, .mode_set = fimd_win_mode_set,
.commit = fimd_win_commit, .commit = fimd_win_commit,
.disable = fimd_win_disable, .disable = fimd_win_disable,
.wait_for_vblank = fimd_wait_for_vblank,
}; };
static struct exynos_drm_manager fimd_manager = { static struct exynos_drm_manager fimd_manager = {
@ -623,7 +669,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t; struct drm_pending_vblank_event *e, *t;
struct timeval now; struct timeval now;
unsigned long flags; unsigned long flags;
bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags); spin_lock_irqsave(&drm_dev->event_lock, flags);
@ -633,8 +678,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe) if (crtc != e->pipe)
continue; continue;
is_checked = true;
do_gettimeofday(&now); do_gettimeofday(&now);
e->event.sequence = 0; e->event.sequence = 0;
e->event.tv_sec = now.tv_sec; e->event.tv_sec = now.tv_sec;
@ -642,22 +685,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list); list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait); wake_up_interruptible(&e->base.file_priv->event_wait);
} drm_vblank_put(drm_dev, crtc);
if (is_checked) {
/*
* call drm_vblank_put only in case that drm_vblank_get was
* called.
*/
if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
drm_vblank_put(drm_dev, crtc);
/*
* don't off vblank if vblank_disable_allowed is 1,
* because vblank would be off by timer handler.
*/
if (!drm_dev->vblank_disable_allowed)
drm_vblank_off(drm_dev, crtc);
} }
spin_unlock_irqrestore(&drm_dev->event_lock, flags); spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@ -684,6 +712,11 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
drm_handle_vblank(drm_dev, manager->pipe); drm_handle_vblank(drm_dev, manager->pipe);
fimd_finish_pageflip(drm_dev, manager->pipe); fimd_finish_pageflip(drm_dev, manager->pipe);
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
atomic_set(&ctx->wait_vsync_event, 0);
DRM_WAKEUP(&ctx->wait_vsync_queue);
}
out: out:
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -709,6 +742,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
*/ */
drm_dev->vblank_disable_allowed = 1; drm_dev->vblank_disable_allowed = 1;
/* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(drm_dev))
drm_iommu_attach_device(drm_dev, dev);
return 0; return 0;
} }
@ -716,7 +753,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{ {
DRM_DEBUG_KMS("%s\n", __FILE__); DRM_DEBUG_KMS("%s\n", __FILE__);
/* TODO. */ /* detach this sub driver from iommu mapping if supported. */
if (is_drm_iommu_supported(drm_dev))
drm_iommu_detach_device(drm_dev, dev);
} }
static int fimd_calc_clkdiv(struct fimd_context *ctx, static int fimd_calc_clkdiv(struct fimd_context *ctx,
@ -805,11 +844,38 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
return 0; return 0;
} }
static void fimd_window_suspend(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
struct fimd_win_data *win_data;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
win_data = &ctx->win_data[i];
win_data->resume = win_data->enabled;
fimd_win_disable(dev, i);
}
fimd_wait_for_vblank(dev);
}
static void fimd_window_resume(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
struct fimd_win_data *win_data;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
win_data = &ctx->win_data[i];
win_data->enabled = win_data->resume;
win_data->resume = false;
}
}
static int fimd_activate(struct fimd_context *ctx, bool enable) static int fimd_activate(struct fimd_context *ctx, bool enable)
{ {
struct device *dev = ctx->subdrv.dev;
if (enable) { if (enable) {
int ret; int ret;
struct device *dev = ctx->subdrv.dev;
ret = fimd_clock(ctx, true); ret = fimd_clock(ctx, true);
if (ret < 0) if (ret < 0)
@ -820,7 +886,11 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
/* if vblank was enabled status, enable it again. */ /* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(0, &ctx->irq_flags)) if (test_and_clear_bit(0, &ctx->irq_flags))
fimd_enable_vblank(dev); fimd_enable_vblank(dev);
fimd_window_resume(dev);
} else { } else {
fimd_window_suspend(dev);
fimd_clock(ctx, false); fimd_clock(ctx, false);
ctx->suspended = true; ctx->suspended = true;
} }
@ -857,18 +927,16 @@ static int __devinit fimd_probe(struct platform_device *pdev)
if (!ctx) if (!ctx)
return -ENOMEM; return -ENOMEM;
ctx->bus_clk = clk_get(dev, "fimd"); ctx->bus_clk = devm_clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) { if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n"); dev_err(dev, "failed to get bus clock\n");
ret = PTR_ERR(ctx->bus_clk); return PTR_ERR(ctx->bus_clk);
goto err_clk_get;
} }
ctx->lcd_clk = clk_get(dev, "sclk_fimd"); ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
if (IS_ERR(ctx->lcd_clk)) { if (IS_ERR(ctx->lcd_clk)) {
dev_err(dev, "failed to get lcd clock\n"); dev_err(dev, "failed to get lcd clock\n");
ret = PTR_ERR(ctx->lcd_clk); return PTR_ERR(ctx->lcd_clk);
goto err_bus_clk;
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@ -876,14 +944,13 @@ static int __devinit fimd_probe(struct platform_device *pdev)
ctx->regs = devm_request_and_ioremap(&pdev->dev, res); ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!ctx->regs) { if (!ctx->regs) {
dev_err(dev, "failed to map registers\n"); dev_err(dev, "failed to map registers\n");
ret = -ENXIO; return -ENXIO;
goto err_clk;
} }
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) { if (!res) {
dev_err(dev, "irq request failed.\n"); dev_err(dev, "irq request failed.\n");
goto err_clk; return -ENXIO;
} }
ctx->irq = res->start; ctx->irq = res->start;
@ -892,13 +959,15 @@ static int __devinit fimd_probe(struct platform_device *pdev)
0, "drm_fimd", ctx); 0, "drm_fimd", ctx);
if (ret) { if (ret) {
dev_err(dev, "irq request failed.\n"); dev_err(dev, "irq request failed.\n");
goto err_clk; return ret;
} }
ctx->vidcon0 = pdata->vidcon0; ctx->vidcon0 = pdata->vidcon0;
ctx->vidcon1 = pdata->vidcon1; ctx->vidcon1 = pdata->vidcon1;
ctx->default_win = pdata->default_win; ctx->default_win = pdata->default_win;
ctx->panel = panel; ctx->panel = panel;
DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
subdrv = &ctx->subdrv; subdrv = &ctx->subdrv;
@ -926,17 +995,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
exynos_drm_subdrv_register(subdrv); exynos_drm_subdrv_register(subdrv);
return 0; return 0;
err_clk:
clk_disable(ctx->lcd_clk);
clk_put(ctx->lcd_clk);
err_bus_clk:
clk_disable(ctx->bus_clk);
clk_put(ctx->bus_clk);
err_clk_get:
return ret;
} }
static int __devexit fimd_remove(struct platform_device *pdev) static int __devexit fimd_remove(struct platform_device *pdev)
@ -960,9 +1018,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
out: out:
pm_runtime_disable(dev); pm_runtime_disable(dev);
clk_put(ctx->lcd_clk);
clk_put(ctx->bus_clk);
return 0; return 0;
} }
@ -1056,5 +1111,6 @@ struct platform_driver fimd_driver = {
.name = "exynos4-fb", .name = "exynos4-fb",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.pm = &fimd_pm_ops, .pm = &fimd_pm_ops,
.of_match_table = of_match_ptr(fimd_driver_dt_match),
}, },
}; };

View File

@ -17,11 +17,14 @@
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#include <linux/dma-attrs.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/exynos_drm.h> #include <drm/exynos_drm.h>
#include "exynos_drm_drv.h" #include "exynos_drm_drv.h"
#include "exynos_drm_gem.h" #include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"
#define G2D_HW_MAJOR_VER 4 #define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1 #define G2D_HW_MINOR_VER 1
@ -92,11 +95,21 @@
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
#define MAX_BUF_ADDR_NR 6
/* maximum buffer pool size of userptr is 64MB as default */
#define MAX_POOL (64 * 1024 * 1024)
enum {
BUF_TYPE_GEM = 1,
BUF_TYPE_USERPTR,
};
/* cmdlist data structure */ /* cmdlist data structure */
struct g2d_cmdlist { struct g2d_cmdlist {
u32 head; u32 head;
u32 data[G2D_CMDLIST_DATA_NUM]; unsigned long data[G2D_CMDLIST_DATA_NUM];
u32 last; /* last data offset */ u32 last; /* last data offset */
}; };
struct drm_exynos_pending_g2d_event { struct drm_exynos_pending_g2d_event {
@ -104,15 +117,26 @@ struct drm_exynos_pending_g2d_event {
struct drm_exynos_g2d_event event; struct drm_exynos_g2d_event event;
}; };
struct g2d_gem_node { struct g2d_cmdlist_userptr {
struct list_head list; struct list_head list;
unsigned int handle; dma_addr_t dma_addr;
unsigned long userptr;
unsigned long size;
struct page **pages;
unsigned int npages;
struct sg_table *sgt;
struct vm_area_struct *vma;
atomic_t refcount;
bool in_pool;
bool out_of_list;
}; };
struct g2d_cmdlist_node { struct g2d_cmdlist_node {
struct list_head list; struct list_head list;
struct g2d_cmdlist *cmdlist; struct g2d_cmdlist *cmdlist;
unsigned int gem_nr; unsigned int map_nr;
unsigned long handles[MAX_BUF_ADDR_NR];
unsigned int obj_type[MAX_BUF_ADDR_NR];
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct drm_exynos_pending_g2d_event *event; struct drm_exynos_pending_g2d_event *event;
@ -122,6 +146,7 @@ struct g2d_runqueue_node {
struct list_head list; struct list_head list;
struct list_head run_cmdlist; struct list_head run_cmdlist;
struct list_head event_list; struct list_head event_list;
struct drm_file *filp;
pid_t pid; pid_t pid;
struct completion complete; struct completion complete;
int async; int async;
@ -143,23 +168,33 @@ struct g2d_data {
struct mutex cmdlist_mutex; struct mutex cmdlist_mutex;
dma_addr_t cmdlist_pool; dma_addr_t cmdlist_pool;
void *cmdlist_pool_virt; void *cmdlist_pool_virt;
struct dma_attrs cmdlist_dma_attrs;
/* runqueue*/ /* runqueue*/
struct g2d_runqueue_node *runqueue_node; struct g2d_runqueue_node *runqueue_node;
struct list_head runqueue; struct list_head runqueue;
struct mutex runqueue_mutex; struct mutex runqueue_mutex;
struct kmem_cache *runqueue_slab; struct kmem_cache *runqueue_slab;
unsigned long current_pool;
unsigned long max_pool;
}; };
static int g2d_init_cmdlist(struct g2d_data *g2d) static int g2d_init_cmdlist(struct g2d_data *g2d)
{ {
struct device *dev = g2d->dev; struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node; struct g2d_cmdlist_node *node = g2d->cmdlist_node;
struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int nr; int nr;
int ret; int ret;
g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE, init_dma_attrs(&g2d->cmdlist_dma_attrs);
&g2d->cmdlist_pool, GFP_KERNEL); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
G2D_CMDLIST_POOL_SIZE,
&g2d->cmdlist_pool, GFP_KERNEL,
&g2d->cmdlist_dma_attrs);
if (!g2d->cmdlist_pool_virt) { if (!g2d->cmdlist_pool_virt) {
dev_err(dev, "failed to allocate dma memory\n"); dev_err(dev, "failed to allocate dma memory\n");
return -ENOMEM; return -ENOMEM;
@ -184,18 +219,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return 0; return 0;
err: err:
dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool); g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
return ret; return ret;
} }
static void g2d_fini_cmdlist(struct g2d_data *g2d) static void g2d_fini_cmdlist(struct g2d_data *g2d)
{ {
struct device *dev = g2d->dev; struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
kfree(g2d->cmdlist_node); kfree(g2d->cmdlist_node);
dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool); g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
} }
static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@ -245,62 +282,300 @@ add_to_list:
list_add_tail(&node->event->base.link, &g2d_priv->event_list); list_add_tail(&node->event->base.link, &g2d_priv->event_list);
} }
static int g2d_get_cmdlist_gem(struct drm_device *drm_dev, static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
struct drm_file *file, unsigned long obj,
struct g2d_cmdlist_node *node) bool force)
{ {
struct drm_exynos_file_private *file_priv = file->driver_priv; struct g2d_cmdlist_userptr *g2d_userptr =
(struct g2d_cmdlist_userptr *)obj;
if (!obj)
return;
if (force)
goto out;
atomic_dec(&g2d_userptr->refcount);
if (atomic_read(&g2d_userptr->refcount) > 0)
return;
if (g2d_userptr->in_pool)
return;
out:
exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
DMA_BIDIRECTIONAL);
exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
g2d_userptr->npages,
g2d_userptr->vma);
if (!g2d_userptr->out_of_list)
list_del_init(&g2d_userptr->list);
sg_free_table(g2d_userptr->sgt);
kfree(g2d_userptr->sgt);
g2d_userptr->sgt = NULL;
kfree(g2d_userptr->pages);
g2d_userptr->pages = NULL;
kfree(g2d_userptr);
g2d_userptr = NULL;
}
dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
unsigned long userptr,
unsigned long size,
struct drm_file *filp,
unsigned long *obj)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist_userptr *g2d_userptr;
struct g2d_data *g2d;
struct page **pages;
struct sg_table *sgt;
struct vm_area_struct *vma;
unsigned long start, end;
unsigned int npages, offset;
int ret;
if (!size) {
DRM_ERROR("invalid userptr size.\n");
return ERR_PTR(-EINVAL);
}
g2d = dev_get_drvdata(g2d_priv->dev);
/* check if userptr already exists in userptr_list. */
list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
if (g2d_userptr->userptr == userptr) {
/*
* also check size because there could be same address
* and different size.
*/
if (g2d_userptr->size == size) {
atomic_inc(&g2d_userptr->refcount);
*obj = (unsigned long)g2d_userptr;
return &g2d_userptr->dma_addr;
}
/*
* at this moment, maybe g2d dma is accessing this
* g2d_userptr memory region so just remove this
* g2d_userptr object from userptr_list not to be
* referred again and also except it the userptr
* pool to be released after the dma access completion.
*/
g2d_userptr->out_of_list = true;
g2d_userptr->in_pool = false;
list_del_init(&g2d_userptr->list);
break;
}
}
g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
if (!g2d_userptr) {
DRM_ERROR("failed to allocate g2d_userptr.\n");
return ERR_PTR(-ENOMEM);
}
atomic_set(&g2d_userptr->refcount, 1);
start = userptr & PAGE_MASK;
offset = userptr & ~PAGE_MASK;
end = PAGE_ALIGN(userptr + size);
npages = (end - start) >> PAGE_SHIFT;
g2d_userptr->npages = npages;
pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
if (!pages) {
DRM_ERROR("failed to allocate pages.\n");
kfree(g2d_userptr);
return ERR_PTR(-ENOMEM);
}
vma = find_vma(current->mm, userptr);
if (!vma) {
DRM_ERROR("failed to get vm region.\n");
ret = -EFAULT;
goto err_free_pages;
}
if (vma->vm_end < userptr + size) {
DRM_ERROR("vma is too small.\n");
ret = -EFAULT;
goto err_free_pages;
}
g2d_userptr->vma = exynos_gem_get_vma(vma);
if (!g2d_userptr->vma) {
DRM_ERROR("failed to copy vma.\n");
ret = -ENOMEM;
goto err_free_pages;
}
g2d_userptr->size = size;
ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
npages, pages, vma);
if (ret < 0) {
DRM_ERROR("failed to get user pages from userptr.\n");
goto err_put_vma;
}
g2d_userptr->pages = pages;
sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
if (!sgt) {
DRM_ERROR("failed to allocate sg table.\n");
ret = -ENOMEM;
goto err_free_userptr;
}
ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
size, GFP_KERNEL);
if (ret < 0) {
DRM_ERROR("failed to get sgt from pages.\n");
goto err_free_sgt;
}
g2d_userptr->sgt = sgt;
ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
DMA_BIDIRECTIONAL);
if (ret < 0) {
DRM_ERROR("failed to map sgt with dma region.\n");
goto err_free_sgt;
}
g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
g2d_userptr->userptr = userptr;
list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
g2d->current_pool += npages << PAGE_SHIFT;
g2d_userptr->in_pool = true;
}
*obj = (unsigned long)g2d_userptr;
return &g2d_userptr->dma_addr;
err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
sgt = NULL;
err_free_userptr:
exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
g2d_userptr->npages,
g2d_userptr->vma);
err_put_vma:
exynos_gem_put_vma(g2d_userptr->vma);
err_free_pages:
kfree(pages);
kfree(g2d_userptr);
pages = NULL;
g2d_userptr = NULL;
return ERR_PTR(ret);
}
static void g2d_userptr_free_all(struct drm_device *drm_dev,
struct g2d_data *g2d,
struct drm_file *filp)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist_userptr *g2d_userptr, *n;
list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
if (g2d_userptr->in_pool)
g2d_userptr_put_dma_addr(drm_dev,
(unsigned long)g2d_userptr,
true);
g2d->current_pool = 0;
}
static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
struct drm_device *drm_dev,
struct drm_file *file)
{
struct g2d_cmdlist *cmdlist = node->cmdlist; struct g2d_cmdlist *cmdlist = node->cmdlist;
dma_addr_t *addr;
int offset; int offset;
int i; int i;
for (i = 0; i < node->gem_nr; i++) { for (i = 0; i < node->map_nr; i++) {
struct g2d_gem_node *gem_node; unsigned long handle;
dma_addr_t *addr;
gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
if (!gem_node) {
dev_err(g2d_priv->dev, "failed to allocate gem node\n");
return -ENOMEM;
}
offset = cmdlist->last - (i * 2 + 1); offset = cmdlist->last - (i * 2 + 1);
gem_node->handle = cmdlist->data[offset]; handle = cmdlist->data[offset];
addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle, if (node->obj_type[i] == BUF_TYPE_GEM) {
file); addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
if (IS_ERR(addr)) { file);
node->gem_nr = i; if (IS_ERR(addr)) {
kfree(gem_node); node->map_nr = i;
return PTR_ERR(addr); return -EFAULT;
}
} else {
struct drm_exynos_g2d_userptr g2d_userptr;
if (copy_from_user(&g2d_userptr, (void __user *)handle,
sizeof(struct drm_exynos_g2d_userptr))) {
node->map_nr = i;
return -EFAULT;
}
addr = g2d_userptr_get_dma_addr(drm_dev,
g2d_userptr.userptr,
g2d_userptr.size,
file,
&handle);
if (IS_ERR(addr)) {
node->map_nr = i;
return -EFAULT;
}
} }
cmdlist->data[offset] = *addr; cmdlist->data[offset] = *addr;
list_add_tail(&gem_node->list, &g2d_priv->gem_list); node->handles[i] = handle;
g2d_priv->gem_nr++;
} }
return 0; return 0;
} }
static void g2d_put_cmdlist_gem(struct drm_device *drm_dev, static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
struct drm_file *file, struct g2d_cmdlist_node *node,
unsigned int nr) struct drm_file *filp)
{ {
struct drm_exynos_file_private *file_priv = file->driver_priv; struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; int i;
struct g2d_gem_node *node, *n;
list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) { for (i = 0; i < node->map_nr; i++) {
if (!nr) unsigned long handle = node->handles[i];
break;
exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file); if (node->obj_type[i] == BUF_TYPE_GEM)
list_del_init(&node->list); exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
kfree(node); filp);
nr--; else
g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
false);
node->handles[i] = 0;
} }
node->map_nr = 0;
} }
static void g2d_dma_start(struct g2d_data *g2d, static void g2d_dma_start(struct g2d_data *g2d,
@ -337,10 +612,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
static void g2d_free_runqueue_node(struct g2d_data *g2d, static void g2d_free_runqueue_node(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node) struct g2d_runqueue_node *runqueue_node)
{ {
struct g2d_cmdlist_node *node;
if (!runqueue_node) if (!runqueue_node)
return; return;
mutex_lock(&g2d->cmdlist_mutex); mutex_lock(&g2d->cmdlist_mutex);
/*
* commands in run_cmdlist have been completed so unmap all gem
* objects in each command node so that they are unreferenced.
*/
list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist); list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex); mutex_unlock(&g2d->cmdlist_mutex);
@ -430,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist, static int g2d_check_reg_offset(struct device *dev,
struct g2d_cmdlist_node *node,
int nr, bool for_addr) int nr, bool for_addr)
{ {
struct g2d_cmdlist *cmdlist = node->cmdlist;
int reg_offset; int reg_offset;
int index; int index;
int i; int i;
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
index = cmdlist->last - 2 * (i + 1); index = cmdlist->last - 2 * (i + 1);
if (for_addr) {
/* check userptr buffer type. */
reg_offset = (cmdlist->data[index] &
~0x7fffffff) >> 31;
if (reg_offset) {
node->obj_type[i] = BUF_TYPE_USERPTR;
cmdlist->data[index] &= ~G2D_BUF_USERPTR;
}
}
reg_offset = cmdlist->data[index] & ~0xfffff000; reg_offset = cmdlist->data[index] & ~0xfffff000;
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@ -455,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
case G2D_MSK_BASE_ADDR: case G2D_MSK_BASE_ADDR:
if (!for_addr) if (!for_addr)
goto err; goto err;
if (node->obj_type[i] != BUF_TYPE_USERPTR)
node->obj_type[i] = BUF_TYPE_GEM;
break; break;
default: default:
if (for_addr) if (for_addr)
@ -466,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
return 0; return 0;
err: err:
dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]); dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
return -EINVAL; return -EINVAL;
} }
@ -566,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
} }
/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2; size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) { if (size > G2D_CMDLIST_DATA_NUM) {
dev_err(dev, "cmdlist size is too big\n"); dev_err(dev, "cmdlist size is too big\n");
ret = -EINVAL; ret = -EINVAL;
@ -583,29 +882,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
} }
cmdlist->last += req->cmd_nr * 2; cmdlist->last += req->cmd_nr * 2;
ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false); ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
if (ret < 0) if (ret < 0)
goto err_free_event; goto err_free_event;
node->gem_nr = req->cmd_gem_nr; node->map_nr = req->cmd_buf_nr;
if (req->cmd_gem_nr) { if (req->cmd_buf_nr) {
struct drm_exynos_g2d_cmd *cmd_gem; struct drm_exynos_g2d_cmd *cmd_buf;
cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem; cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
if (copy_from_user(cmdlist->data + cmdlist->last, if (copy_from_user(cmdlist->data + cmdlist->last,
(void __user *)cmd_gem, (void __user *)cmd_buf,
sizeof(*cmd_gem) * req->cmd_gem_nr)) { sizeof(*cmd_buf) * req->cmd_buf_nr)) {
ret = -EFAULT; ret = -EFAULT;
goto err_free_event; goto err_free_event;
} }
cmdlist->last += req->cmd_gem_nr * 2; cmdlist->last += req->cmd_buf_nr * 2;
ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true); ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
if (ret < 0) if (ret < 0)
goto err_free_event; goto err_free_event;
ret = g2d_get_cmdlist_gem(drm_dev, file, node); ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
if (ret < 0) if (ret < 0)
goto err_unmap; goto err_unmap;
} }
@ -624,7 +923,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
return 0; return 0;
err_unmap: err_unmap:
g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr); g2d_unmap_cmdlist_gem(g2d, node, file);
err_free_event: err_free_event:
if (node->event) { if (node->event) {
spin_lock_irqsave(&drm_dev->event_lock, flags); spin_lock_irqsave(&drm_dev->event_lock, flags);
@ -680,6 +979,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
mutex_lock(&g2d->runqueue_mutex); mutex_lock(&g2d->runqueue_mutex);
runqueue_node->pid = current->pid; runqueue_node->pid = current->pid;
runqueue_node->filp = file;
list_add_tail(&runqueue_node->list, &g2d->runqueue); list_add_tail(&runqueue_node->list, &g2d->runqueue);
if (!g2d->runqueue_node) if (!g2d->runqueue_node)
g2d_exec_runqueue(g2d); g2d_exec_runqueue(g2d);
@ -696,6 +996,43 @@ out:
} }
EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
{
struct g2d_data *g2d;
int ret;
g2d = dev_get_drvdata(dev);
if (!g2d)
return -EFAULT;
/* allocate dma-aware cmdlist buffer. */
ret = g2d_init_cmdlist(g2d);
if (ret < 0) {
dev_err(dev, "cmdlist init failed\n");
return ret;
}
if (!is_drm_iommu_supported(drm_dev))
return 0;
ret = drm_iommu_attach_device(drm_dev, dev);
if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n");
g2d_fini_cmdlist(g2d);
}
return ret;
}
static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
if (!is_drm_iommu_supported(drm_dev))
return;
drm_iommu_detach_device(drm_dev, dev);
}
static int g2d_open(struct drm_device *drm_dev, struct device *dev, static int g2d_open(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file) struct drm_file *file)
{ {
@ -713,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
INIT_LIST_HEAD(&g2d_priv->event_list); INIT_LIST_HEAD(&g2d_priv->event_list);
INIT_LIST_HEAD(&g2d_priv->gem_list); INIT_LIST_HEAD(&g2d_priv->userptr_list);
return 0; return 0;
} }
@ -734,11 +1071,21 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
return; return;
mutex_lock(&g2d->cmdlist_mutex); mutex_lock(&g2d->cmdlist_mutex);
list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
/*
* unmap all gem objects not completed.
*
* P.S. if current process was terminated forcely then
* there may be some commands in inuse_cmdlist so unmap
* them.
*/
g2d_unmap_cmdlist_gem(g2d, node, file);
list_move_tail(&node->list, &g2d->free_cmdlist); list_move_tail(&node->list, &g2d->free_cmdlist);
}
mutex_unlock(&g2d->cmdlist_mutex); mutex_unlock(&g2d->cmdlist_mutex);
g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr); /* release all g2d_userptr in pool. */
g2d_userptr_free_all(drm_dev, g2d, file);
kfree(file_priv->g2d_priv); kfree(file_priv->g2d_priv);
} }
@ -778,15 +1125,11 @@ static int __devinit g2d_probe(struct platform_device *pdev)
mutex_init(&g2d->cmdlist_mutex); mutex_init(&g2d->cmdlist_mutex);
mutex_init(&g2d->runqueue_mutex); mutex_init(&g2d->runqueue_mutex);
ret = g2d_init_cmdlist(g2d); g2d->gate_clk = devm_clk_get(dev, "fimg2d");
if (ret < 0)
goto err_destroy_workqueue;
g2d->gate_clk = clk_get(dev, "fimg2d");
if (IS_ERR(g2d->gate_clk)) { if (IS_ERR(g2d->gate_clk)) {
dev_err(dev, "failed to get gate clock\n"); dev_err(dev, "failed to get gate clock\n");
ret = PTR_ERR(g2d->gate_clk); ret = PTR_ERR(g2d->gate_clk);
goto err_fini_cmdlist; goto err_destroy_workqueue;
} }
pm_runtime_enable(dev); pm_runtime_enable(dev);
@ -814,10 +1157,14 @@ static int __devinit g2d_probe(struct platform_device *pdev)
goto err_put_clk; goto err_put_clk;
} }
g2d->max_pool = MAX_POOL;
platform_set_drvdata(pdev, g2d); platform_set_drvdata(pdev, g2d);
subdrv = &g2d->subdrv; subdrv = &g2d->subdrv;
subdrv->dev = dev; subdrv->dev = dev;
subdrv->probe = g2d_subdrv_probe;
subdrv->remove = g2d_subdrv_remove;
subdrv->open = g2d_open; subdrv->open = g2d_open;
subdrv->close = g2d_close; subdrv->close = g2d_close;
@ -834,9 +1181,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
err_put_clk: err_put_clk:
pm_runtime_disable(dev); pm_runtime_disable(dev);
clk_put(g2d->gate_clk);
err_fini_cmdlist:
g2d_fini_cmdlist(g2d);
err_destroy_workqueue: err_destroy_workqueue:
destroy_workqueue(g2d->g2d_workq); destroy_workqueue(g2d->g2d_workq);
err_destroy_slab: err_destroy_slab:
@ -857,7 +1201,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
} }
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
clk_put(g2d->gate_clk);
g2d_fini_cmdlist(g2d); g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq); destroy_workqueue(g2d->g2d_workq);

View File

@ -83,157 +83,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{ {
if (!IS_NONCONTIG_BUFFER(flags)) { /* TODO */
if (size >= SZ_1M)
return roundup(size, SECTION_SIZE);
else if (size >= SZ_64K)
return roundup(size, SZ_64K);
else
goto out;
}
out:
return roundup(size, PAGE_SIZE); return roundup(size, PAGE_SIZE);
} }
struct page **exynos_gem_get_pages(struct drm_gem_object *obj, static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
gfp_t gfpmask)
{
struct page *p, **pages;
int i, npages;
npages = obj->size >> PAGE_SHIFT;
pages = drm_malloc_ab(npages, sizeof(struct page *));
if (pages == NULL)
return ERR_PTR(-ENOMEM);
for (i = 0; i < npages; i++) {
p = alloc_page(gfpmask);
if (IS_ERR(p))
goto fail;
pages[i] = p;
}
return pages;
fail:
while (--i)
__free_page(pages[i]);
drm_free_large(pages);
return ERR_CAST(p);
}
static void exynos_gem_put_pages(struct drm_gem_object *obj,
struct page **pages)
{
int npages;
npages = obj->size >> PAGE_SHIFT;
while (--npages >= 0)
__free_page(pages[npages]);
drm_free_large(pages);
}
static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long f_vaddr, unsigned long f_vaddr,
pgoff_t page_offset) pgoff_t page_offset)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
unsigned long pfn;
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
if (!buf->pages)
return -EINTR;
pfn = page_to_pfn(buf->pages[page_offset++]);
} else
pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
return vm_insert_mixed(vma, f_vaddr, pfn);
}
static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
{ {
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
struct scatterlist *sgl; struct scatterlist *sgl;
struct page **pages; unsigned long pfn;
unsigned int npages, i = 0; int i;
int ret;
if (buf->pages) { if (!buf->sgt)
DRM_DEBUG_KMS("already allocated.\n"); return -EINTR;
if (page_offset >= (buf->size >> PAGE_SHIFT)) {
DRM_ERROR("invalid page offset\n");
return -EINVAL; return -EINVAL;
} }
pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
if (IS_ERR(pages)) {
DRM_ERROR("failed to get pages.\n");
return PTR_ERR(pages);
}
npages = obj->size >> PAGE_SHIFT;
buf->page_size = PAGE_SIZE;
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!buf->sgt) {
DRM_ERROR("failed to allocate sg table.\n");
ret = -ENOMEM;
goto err;
}
ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
if (ret < 0) {
DRM_ERROR("failed to initialize sg table.\n");
ret = -EFAULT;
goto err1;
}
sgl = buf->sgt->sgl; sgl = buf->sgt->sgl;
for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
/* set all pages to sg list. */ if (page_offset < (sgl->length >> PAGE_SHIFT))
while (i < npages) { break;
sg_set_page(sgl, pages[i], PAGE_SIZE, 0); page_offset -= (sgl->length >> PAGE_SHIFT);
sg_dma_address(sgl) = page_to_phys(pages[i]);
i++;
sgl = sg_next(sgl);
} }
/* add some codes for UNCACHED type here. TODO */ pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
buf->pages = pages; return vm_insert_mixed(vma, f_vaddr, pfn);
return ret;
err1:
kfree(buf->sgt);
buf->sgt = NULL;
err:
exynos_gem_put_pages(obj, pages);
return ret;
}
static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
/*
* if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
* allocated at gem fault handler.
*/
sg_free_table(buf->sgt);
kfree(buf->sgt);
buf->sgt = NULL;
exynos_gem_put_pages(obj, buf->pages);
buf->pages = NULL;
/* add some codes for UNCACHED type here. TODO */
} }
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@ -270,9 +153,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
if (!buf->pages)
return;
/* /*
* do not release memory region from exporter. * do not release memory region from exporter.
* *
@ -282,10 +162,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (obj->import_attach) if (obj->import_attach)
goto out; goto out;
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
exynos_drm_gem_put_pages(obj);
else
exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
out: out:
exynos_drm_fini_buf(obj->dev, buf); exynos_drm_fini_buf(obj->dev, buf);
@ -364,22 +241,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
/* set memory type and cache attribute from user side. */ /* set memory type and cache attribute from user side. */
exynos_gem_obj->flags = flags; exynos_gem_obj->flags = flags;
/* ret = exynos_drm_alloc_buf(dev, buf, flags);
* allocate all pages as desired size if user wants to allocate if (ret < 0) {
* physically non-continuous memory. drm_gem_object_release(&exynos_gem_obj->base);
*/ goto err_fini_buf;
if (flags & EXYNOS_BO_NONCONTIG) {
ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
if (ret < 0) {
drm_gem_object_release(&exynos_gem_obj->base);
goto err_fini_buf;
}
} else {
ret = exynos_drm_alloc_buf(dev, buf, flags);
if (ret < 0) {
drm_gem_object_release(&exynos_gem_obj->base);
goto err_fini_buf;
}
} }
return exynos_gem_obj; return exynos_gem_obj;
@ -412,14 +277,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
} }
void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle, unsigned int gem_handle,
struct drm_file *file_priv) struct drm_file *filp)
{ {
struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj; struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, file_priv, gem_handle); obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) { if (!obj) {
DRM_ERROR("failed to lookup gem object.\n"); DRM_ERROR("failed to lookup gem object.\n");
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
@ -427,25 +292,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj); exynos_gem_obj = to_exynos_gem_obj(obj);
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
DRM_DEBUG_KMS("not support NONCONTIG type.\n");
drm_gem_object_unreference_unlocked(obj);
/* TODO */
return ERR_PTR(-EINVAL);
}
return &exynos_gem_obj->buffer->dma_addr; return &exynos_gem_obj->buffer->dma_addr;
} }
void exynos_drm_gem_put_dma_addr(struct drm_device *dev, void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle, unsigned int gem_handle,
struct drm_file *file_priv) struct drm_file *filp)
{ {
struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj; struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, file_priv, gem_handle); obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) { if (!obj) {
DRM_ERROR("failed to lookup gem object.\n"); DRM_ERROR("failed to lookup gem object.\n");
return; return;
@ -453,14 +310,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj); exynos_gem_obj = to_exynos_gem_obj(obj);
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
DRM_DEBUG_KMS("not support NONCONTIG type.\n");
drm_gem_object_unreference_unlocked(obj);
/* TODO */
return;
}
drm_gem_object_unreference_unlocked(obj); drm_gem_object_unreference_unlocked(obj);
/* /*
@ -489,22 +338,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
&args->offset); &args->offset);
} }
static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
struct file *filp)
{
struct drm_file *file_priv;
mutex_lock(&drm_dev->struct_mutex);
/* find current process's drm_file from filelist. */
list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
if (file_priv->filp == filp) {
mutex_unlock(&drm_dev->struct_mutex);
return file_priv;
}
}
mutex_unlock(&drm_dev->struct_mutex);
WARN_ON(1);
return ERR_PTR(-EFAULT);
}
static int exynos_drm_gem_mmap_buffer(struct file *filp, static int exynos_drm_gem_mmap_buffer(struct file *filp,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
struct drm_gem_object *obj = filp->private_data; struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct drm_device *drm_dev = obj->dev;
struct exynos_drm_gem_buf *buffer; struct exynos_drm_gem_buf *buffer;
unsigned long pfn, vm_size, usize, uaddr = vma->vm_start; struct drm_file *file_priv;
unsigned long vm_size;
int ret; int ret;
DRM_DEBUG_KMS("%s\n", __FILE__); DRM_DEBUG_KMS("%s\n", __FILE__);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = obj;
vma->vm_ops = drm_dev->driver->gem_vm_ops;
/* restore it to driver's fops. */
filp->f_op = fops_get(drm_dev->driver->fops);
file_priv = exynos_drm_find_drm_file(drm_dev, filp);
if (IS_ERR(file_priv))
return PTR_ERR(file_priv);
/* restore it to drm_file. */
filp->private_data = file_priv;
update_vm_cache_attr(exynos_gem_obj, vma); update_vm_cache_attr(exynos_gem_obj, vma);
vm_size = usize = vma->vm_end - vma->vm_start; vm_size = vma->vm_end - vma->vm_start;
/* /*
* a buffer contains information to physically continuous memory * a buffer contains information to physically continuous memory
@ -516,41 +400,24 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
if (vm_size > buffer->size) if (vm_size > buffer->size)
return -EINVAL; return -EINVAL;
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
int i = 0; buffer->dma_addr, buffer->size,
&buffer->dma_attrs);
if (!buffer->pages) if (ret < 0) {
return -EINVAL; DRM_ERROR("failed to mmap.\n");
return ret;
vma->vm_flags |= VM_MIXEDMAP;
do {
ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
if (ret) {
DRM_ERROR("failed to remap user space.\n");
return ret;
}
uaddr += PAGE_SIZE;
usize -= PAGE_SIZE;
} while (usize > 0);
} else {
/*
* get page frame number to physical memory to be mapped
* to user space.
*/
pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
PAGE_SHIFT;
DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
vma->vm_page_prot)) {
DRM_ERROR("failed to remap pfn range.\n");
return -EAGAIN;
}
} }
/*
* take a reference to this mapping of the object. And this reference
* is unreferenced by the corresponding vm_close call.
*/
drm_gem_object_reference(obj);
mutex_lock(&drm_dev->struct_mutex);
drm_vm_open_locked(drm_dev, vma);
mutex_unlock(&drm_dev->struct_mutex);
return 0; return 0;
} }
@ -578,16 +445,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
obj->filp->f_op = &exynos_drm_gem_fops; /*
obj->filp->private_data = obj; * Set specific mmper's fops. And it will be restored by
* exynos_drm_gem_mmap_buffer to dev->driver->fops.
* This is used to call specific mapper temporarily.
*/
file_priv->filp->f_op = &exynos_drm_gem_fops;
addr = vm_mmap(obj->filp, 0, args->size, /*
* Set gem object to private_data so that specific mmaper
* can get the gem object. And it will be restored by
* exynos_drm_gem_mmap_buffer to drm_file.
*/
file_priv->filp->private_data = obj;
addr = vm_mmap(file_priv->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0); PROT_READ | PROT_WRITE, MAP_SHARED, 0);
drm_gem_object_unreference_unlocked(obj); drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr)) if (IS_ERR((void *)addr)) {
file_priv->filp->private_data = file_priv;
return PTR_ERR((void *)addr); return PTR_ERR((void *)addr);
}
args->mapped = addr; args->mapped = addr;
@ -622,6 +502,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
} }
struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
{
struct vm_area_struct *vma_copy;
vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
if (!vma_copy)
return NULL;
if (vma->vm_ops && vma->vm_ops->open)
vma->vm_ops->open(vma);
if (vma->vm_file)
get_file(vma->vm_file);
memcpy(vma_copy, vma, sizeof(*vma));
vma_copy->vm_mm = NULL;
vma_copy->vm_next = NULL;
vma_copy->vm_prev = NULL;
return vma_copy;
}
void exynos_gem_put_vma(struct vm_area_struct *vma)
{
if (!vma)
return;
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file)
fput(vma->vm_file);
kfree(vma);
}
int exynos_gem_get_pages_from_userptr(unsigned long start,
unsigned int npages,
struct page **pages,
struct vm_area_struct *vma)
{
int get_npages;
/* the memory region mmaped with VM_PFNMAP. */
if (vma_is_io(vma)) {
unsigned int i;
for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
unsigned long pfn;
int ret = follow_pfn(vma, start, &pfn);
if (ret)
return ret;
pages[i] = pfn_to_page(pfn);
}
if (i != npages) {
DRM_ERROR("failed to get user_pages.\n");
return -EINVAL;
}
return 0;
}
get_npages = get_user_pages(current, current->mm, start,
npages, 1, 1, pages, NULL);
get_npages = max(get_npages, 0);
if (get_npages != npages) {
DRM_ERROR("failed to get user_pages.\n");
while (get_npages)
put_page(pages[--get_npages]);
return -EFAULT;
}
return 0;
}
void exynos_gem_put_pages_to_userptr(struct page **pages,
unsigned int npages,
struct vm_area_struct *vma)
{
if (!vma_is_io(vma)) {
unsigned int i;
for (i = 0; i < npages; i++) {
set_page_dirty_lock(pages[i]);
/*
* undo the reference we took when populating
* the table.
*/
put_page(pages[i]);
}
}
}
int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir)
{
int nents;
mutex_lock(&drm_dev->struct_mutex);
nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
if (!nents) {
DRM_ERROR("failed to map sgl with dma.\n");
mutex_unlock(&drm_dev->struct_mutex);
return nents;
}
mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir)
{
dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
}
int exynos_drm_gem_init_object(struct drm_gem_object *obj) int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{ {
DRM_DEBUG_KMS("%s\n", __FILE__); DRM_DEBUG_KMS("%s\n", __FILE__);
@ -753,9 +756,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
if (ret < 0) if (ret < 0)
DRM_ERROR("failed to map pages.\n"); DRM_ERROR("failed to map a buffer with user.\n");
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);

View File

@ -35,21 +35,27 @@
* exynos drm gem buffer structure. * exynos drm gem buffer structure.
* *
* @kvaddr: kernel virtual address to allocated memory region. * @kvaddr: kernel virtual address to allocated memory region.
* *userptr: user space address.
* @dma_addr: bus address(accessed by dma) to allocated memory region. * @dma_addr: bus address(accessed by dma) to allocated memory region.
* - this address could be physical address without IOMMU and * - this address could be physical address without IOMMU and
* device address with IOMMU. * device address with IOMMU.
* @write: whether pages will be written to by the caller.
* @pages: Array of backing pages.
* @sgt: sg table to transfer page data. * @sgt: sg table to transfer page data.
* @pages: contain all pages to allocated memory region.
* @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region. * @size: size of allocated memory region.
* @pfnmap: indicate whether memory region from userptr is mmaped with
* VM_PFNMAP or not.
*/ */
struct exynos_drm_gem_buf { struct exynos_drm_gem_buf {
void __iomem *kvaddr; void __iomem *kvaddr;
unsigned long userptr;
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct sg_table *sgt; struct dma_attrs dma_attrs;
unsigned int write;
struct page **pages; struct page **pages;
unsigned long page_size; struct sg_table *sgt;
unsigned long size; unsigned long size;
bool pfnmap;
}; };
/* /*
@ -65,6 +71,7 @@ struct exynos_drm_gem_buf {
* or at framebuffer creation. * or at framebuffer creation.
* @size: size requested from user, in bytes and this size is aligned * @size: size requested from user, in bytes and this size is aligned
* in page unit. * in page unit.
* @vma: a pointer to vm_area.
* @flags: indicate memory type to allocated buffer and cache attruibute. * @flags: indicate memory type to allocated buffer and cache attruibute.
* *
* P.S. this object would be transfered to user as kms_bo.handle so * P.S. this object would be transfered to user as kms_bo.handle so
@ -74,6 +81,7 @@ struct exynos_drm_gem_obj {
struct drm_gem_object base; struct drm_gem_object base;
struct exynos_drm_gem_buf *buffer; struct exynos_drm_gem_buf *buffer;
unsigned long size; unsigned long size;
struct vm_area_struct *vma;
unsigned int flags; unsigned int flags;
}; };
@ -104,9 +112,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
* other drivers such as 2d/3d acceleration drivers. * other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be increased. * with this function call, gem object reference count would be increased.
*/ */
void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle, unsigned int gem_handle,
struct drm_file *file_priv); struct drm_file *filp);
/* /*
* put dma address from gem handle and this function could be used for * put dma address from gem handle and this function could be used for
@ -115,7 +123,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
*/ */
void exynos_drm_gem_put_dma_addr(struct drm_device *dev, void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle, unsigned int gem_handle,
struct drm_file *file_priv); struct drm_file *filp);
/* get buffer offset to map to user space. */ /* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@ -128,6 +136,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
/* map user space allocated by malloc to pages. */
int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* get buffer information to memory region allocated by gem. */ /* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
@ -163,4 +175,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */ /* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
static inline int vma_is_io(struct vm_area_struct *vma)
{
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
}
/* get a copy of a virtual memory region. */
struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
/* release a userspace virtual memory area. */
void exynos_gem_put_vma(struct vm_area_struct *vma);
/* get pages from user space. */
int exynos_gem_get_pages_from_userptr(unsigned long start,
unsigned int npages,
struct page **pages,
struct vm_area_struct *vma);
/* drop the reference to pages. */
void exynos_gem_put_pages_to_userptr(struct page **pages,
unsigned int npages,
struct vm_area_struct *vma);
/* map sgt with dma region. */
int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir);
/* unmap sgt from dma region. */
void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
struct sg_table *sgt,
enum dma_data_direction dir);
#endif #endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
*
* Authors:
* Eunchul Kim <chulspro.kim@samsung.com>
* Jinyoung Jeon <jy0.jeon@samsung.com>
* Sangmin Lee <lsmin.lee@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _EXYNOS_DRM_GSC_H_
#define _EXYNOS_DRM_GSC_H_
/*
* TODO
* FIMD output interface notifier callback.
* Mixer output interface notifier callback.
*/
#endif /* _EXYNOS_DRM_GSC_H_ */

View File

@ -29,6 +29,9 @@
#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\ #define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
struct drm_hdmi_context, subdrv); struct drm_hdmi_context, subdrv);
/* platform device pointer for common drm hdmi device. */
static struct platform_device *exynos_drm_hdmi_pdev;
/* Common hdmi subdrv needs to access the hdmi and mixer though context. /* Common hdmi subdrv needs to access the hdmi and mixer though context.
* These should be initialied by the repective drivers */ * These should be initialied by the repective drivers */
static struct exynos_drm_hdmi_context *hdmi_ctx; static struct exynos_drm_hdmi_context *hdmi_ctx;
@ -46,6 +49,25 @@ struct drm_hdmi_context {
bool enabled[MIXER_WIN_NR]; bool enabled[MIXER_WIN_NR];
}; };
int exynos_platform_device_hdmi_register(void)
{
if (exynos_drm_hdmi_pdev)
return -EEXIST;
exynos_drm_hdmi_pdev = platform_device_register_simple(
"exynos-drm-hdmi", -1, NULL, 0);
if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
return PTR_ERR(exynos_drm_hdmi_pdev);
return 0;
}
void exynos_platform_device_hdmi_unregister(void)
{
if (exynos_drm_hdmi_pdev)
platform_device_unregister(exynos_drm_hdmi_pdev);
}
void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx) void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
{ {
if (ctx) if (ctx)
@ -157,6 +179,16 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx); return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
} }
static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (mixer_ops && mixer_ops->wait_for_vblank)
mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
}
static void drm_hdmi_mode_fixup(struct device *subdrv_dev, static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
struct drm_connector *connector, struct drm_connector *connector,
const struct drm_display_mode *mode, const struct drm_display_mode *mode,
@ -238,6 +270,7 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
.apply = drm_hdmi_apply, .apply = drm_hdmi_apply,
.enable_vblank = drm_hdmi_enable_vblank, .enable_vblank = drm_hdmi_enable_vblank,
.disable_vblank = drm_hdmi_disable_vblank, .disable_vblank = drm_hdmi_disable_vblank,
.wait_for_vblank = drm_hdmi_wait_for_vblank,
.mode_fixup = drm_hdmi_mode_fixup, .mode_fixup = drm_hdmi_mode_fixup,
.mode_set = drm_hdmi_mode_set, .mode_set = drm_hdmi_mode_set,
.get_max_resol = drm_hdmi_get_max_resol, .get_max_resol = drm_hdmi_get_max_resol,
@ -291,21 +324,10 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
ctx->enabled[win] = false; ctx->enabled[win] = false;
} }
static void drm_mixer_wait_for_vblank(struct device *subdrv_dev)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (mixer_ops && mixer_ops->wait_for_vblank)
mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
}
static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = { static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
.mode_set = drm_mixer_mode_set, .mode_set = drm_mixer_mode_set,
.commit = drm_mixer_commit, .commit = drm_mixer_commit,
.disable = drm_mixer_disable, .disable = drm_mixer_disable,
.wait_for_vblank = drm_mixer_wait_for_vblank,
}; };
static struct exynos_drm_manager hdmi_manager = { static struct exynos_drm_manager hdmi_manager = {
@ -346,9 +368,23 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
ctx->hdmi_ctx->drm_dev = drm_dev; ctx->hdmi_ctx->drm_dev = drm_dev;
ctx->mixer_ctx->drm_dev = drm_dev; ctx->mixer_ctx->drm_dev = drm_dev;
if (mixer_ops->iommu_on)
mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
return 0; return 0;
} }
static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
struct drm_hdmi_context *ctx;
struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
ctx = get_ctx_from_subdrv(subdrv);
if (mixer_ops->iommu_on)
mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
}
static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
@ -368,6 +404,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
subdrv->dev = dev; subdrv->dev = dev;
subdrv->manager = &hdmi_manager; subdrv->manager = &hdmi_manager;
subdrv->probe = hdmi_subdrv_probe; subdrv->probe = hdmi_subdrv_probe;
subdrv->remove = hdmi_subdrv_remove;
platform_set_drvdata(pdev, subdrv); platform_set_drvdata(pdev, subdrv);

View File

@ -62,12 +62,13 @@ struct exynos_hdmi_ops {
struct exynos_mixer_ops { struct exynos_mixer_ops {
/* manager */ /* manager */
int (*iommu_on)(void *ctx, bool enable);
int (*enable_vblank)(void *ctx, int pipe); int (*enable_vblank)(void *ctx, int pipe);
void (*disable_vblank)(void *ctx); void (*disable_vblank)(void *ctx);
void (*wait_for_vblank)(void *ctx);
void (*dpms)(void *ctx, int mode); void (*dpms)(void *ctx, int mode);
/* overlay */ /* overlay */
void (*wait_for_vblank)(void *ctx);
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay); void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
void (*win_commit)(void *ctx, int zpos); void (*win_commit)(void *ctx, int zpos);
void (*win_disable)(void *ctx, int zpos); void (*win_disable)(void *ctx, int zpos);

View File

@ -0,0 +1,150 @@
/* exynos_drm_iommu.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Author: Inki Dae <inki.dae@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drmP.h>
#include <drm/exynos_drm.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/kref.h>
#include <asm/dma-iommu.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_iommu.h"
/*
* drm_create_iommu_mapping - create a mapping structure
*
* @drm_dev: DRM device
*/
int drm_create_iommu_mapping(struct drm_device *drm_dev)
{
struct dma_iommu_mapping *mapping = NULL;
struct exynos_drm_private *priv = drm_dev->dev_private;
struct device *dev = drm_dev->dev;
if (!priv->da_start)
priv->da_start = EXYNOS_DEV_ADDR_START;
if (!priv->da_space_size)
priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
if (!priv->da_space_order)
priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
priv->da_space_size,
priv->da_space_order);
if (IS_ERR(mapping))
return PTR_ERR(mapping);
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
GFP_KERNEL);
dma_set_max_seg_size(dev, 0xffffffffu);
dev->archdata.mapping = mapping;
return 0;
}
/*
* drm_release_iommu_mapping - release iommu mapping structure
*
* @drm_dev: DRM device
*
* if mapping->kref becomes 0 then all things related to iommu mapping
* will be released
*/
void drm_release_iommu_mapping(struct drm_device *drm_dev)
{
struct device *dev = drm_dev->dev;
arm_iommu_release_mapping(dev->archdata.mapping);
}
/*
* drm_iommu_attach_device- attach device to iommu mapping
*
* @drm_dev: DRM device
* @subdrv_dev: device to be attach
*
* This function should be called by sub drivers to attach it to iommu
* mapping.
*/
int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
struct device *dev = drm_dev->dev;
int ret;
if (!dev->archdata.mapping) {
DRM_ERROR("iommu_mapping is null.\n");
return -EFAULT;
}
subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
sizeof(*subdrv_dev->dma_parms),
GFP_KERNEL);
dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
if (ret < 0) {
DRM_DEBUG_KMS("failed iommu attach.\n");
return ret;
}
/*
* Set dma_ops to drm_device just one time.
*
* The dma mapping api needs device object and the api is used
* to allocate physial memory and map it with iommu table.
* If iommu attach succeeded, the sub driver would have dma_ops
* for iommu and also all sub drivers have same dma_ops.
*/
if (!dev->archdata.dma_ops)
dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
return 0;
}
/*
* drm_iommu_detach_device -detach device address space mapping from device
*
* @drm_dev: DRM device
* @subdrv_dev: device to be detached
*
* This function should be called by sub drivers to detach it from iommu
* mapping
*/
void drm_iommu_detach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
struct device *dev = drm_dev->dev;
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
if (!mapping || !mapping->domain)
return;
iommu_detach_device(mapping->domain, subdrv_dev);
drm_release_iommu_mapping(drm_dev);
}

View File

@ -0,0 +1,85 @@
/* exynos_drm_iommu.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* Authoer: Inki Dae <inki.dae@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _EXYNOS_DRM_IOMMU_H_
#define _EXYNOS_DRM_IOMMU_H_
#define EXYNOS_DEV_ADDR_START 0x20000000
#define EXYNOS_DEV_ADDR_SIZE 0x40000000
#define EXYNOS_DEV_ADDR_ORDER 0x4
#ifdef CONFIG_DRM_EXYNOS_IOMMU
int drm_create_iommu_mapping(struct drm_device *drm_dev);
void drm_release_iommu_mapping(struct drm_device *drm_dev);
int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev);
void drm_iommu_detach_device(struct drm_device *dev_dev,
struct device *subdrv_dev);
static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
{
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct device *dev = drm_dev->dev;
return dev->archdata.mapping ? true : false;
#else
return false;
#endif
}
#else
struct dma_iommu_mapping;
static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
{
return 0;
}
static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
{
}
static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
return 0;
}
static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
}
static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
{
return false;
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,266 @@
/*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
*
* Authors:
* Eunchul Kim <chulspro.kim@samsung.com>
* Jinyoung Jeon <jy0.jeon@samsung.com>
* Sangmin Lee <lsmin.lee@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _EXYNOS_DRM_IPP_H_
#define _EXYNOS_DRM_IPP_H_
#define for_each_ipp_ops(pos) \
for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
#define for_each_ipp_planar(pos) \
for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
#define IPP_GET_LCD_WIDTH _IOR('F', 302, int)
#define IPP_GET_LCD_HEIGHT _IOR('F', 303, int)
#define IPP_SET_WRITEBACK _IOW('F', 304, u32)
/* definition of state */
enum drm_exynos_ipp_state {
IPP_STATE_IDLE,
IPP_STATE_START,
IPP_STATE_STOP,
};
/*
* A structure of command work information.
* @work: work structure.
* @ippdrv: current work ippdrv.
* @c_node: command node information.
* @ctrl: command control.
*/
struct drm_exynos_ipp_cmd_work {
struct work_struct work;
struct exynos_drm_ippdrv *ippdrv;
struct drm_exynos_ipp_cmd_node *c_node;
enum drm_exynos_ipp_ctrl ctrl;
};
/*
* A structure of command node.
*
* @priv: IPP private infomation.
* @list: list head to command queue information.
* @event_list: list head of event.
* @mem_list: list head to source,destination memory queue information.
* @cmd_lock: lock for synchronization of access to ioctl.
* @mem_lock: lock for synchronization of access to memory nodes.
* @event_lock: lock for synchronization of access to scheduled event.
* @start_complete: completion of start of command.
* @stop_complete: completion of stop of command.
* @property: property information.
* @start_work: start command work structure.
* @stop_work: stop command work structure.
* @event_work: event work structure.
* @state: state of command node.
*/
struct drm_exynos_ipp_cmd_node {
struct exynos_drm_ipp_private *priv;
struct list_head list;
struct list_head event_list;
struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
struct mutex cmd_lock;
struct mutex mem_lock;
struct mutex event_lock;
struct completion start_complete;
struct completion stop_complete;
struct drm_exynos_ipp_property property;
struct drm_exynos_ipp_cmd_work *start_work;
struct drm_exynos_ipp_cmd_work *stop_work;
struct drm_exynos_ipp_event_work *event_work;
enum drm_exynos_ipp_state state;
};
/*
* A structure of buffer information.
*
* @gem_objs: Y, Cb, Cr each gem object.
* @base: Y, Cb, Cr each planar address.
*/
struct drm_exynos_ipp_buf_info {
unsigned long handles[EXYNOS_DRM_PLANAR_MAX];
dma_addr_t base[EXYNOS_DRM_PLANAR_MAX];
};
/*
* A structure of wb setting infomation.
*
* @enable: enable flag for wb.
* @refresh: HZ of the refresh rate.
*/
struct drm_exynos_ipp_set_wb {
__u32 enable;
__u32 refresh;
};
/*
* A structure of event work information.
*
* @work: work structure.
* @ippdrv: current work ippdrv.
* @buf_id: id of src, dst buffer.
*/
struct drm_exynos_ipp_event_work {
struct work_struct work;
struct exynos_drm_ippdrv *ippdrv;
u32 buf_id[EXYNOS_DRM_OPS_MAX];
};
/*
* A structure of source,destination operations.
*
* @set_fmt: set format of image.
* @set_transf: set transform(rotations, flip).
* @set_size: set size of region.
* @set_addr: set address for dma.
*/
struct exynos_drm_ipp_ops {
int (*set_fmt)(struct device *dev, u32 fmt);
int (*set_transf)(struct device *dev,
enum drm_exynos_degree degree,
enum drm_exynos_flip flip, bool *swap);
int (*set_size)(struct device *dev, int swap,
struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
int (*set_addr)(struct device *dev,
struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
enum drm_exynos_ipp_buf_type buf_type);
};
/*
* A structure of ipp driver.
*
* @drv_list: list head for registed sub driver information.
* @parent_dev: parent device information.
* @dev: platform device.
* @drm_dev: drm device.
* @ipp_id: id of ipp driver.
* @dedicated: dedicated ipp device.
* @ops: source, destination operations.
* @event_workq: event work queue.
* @cmd: current command information.
* @cmd_list: list head for command information.
* @prop_list: property informations of current ipp driver.
* @check_property: check property about format, size, buffer.
* @reset: reset ipp block.
* @start: ipp each device start.
* @stop: ipp each device stop.
* @sched_event: work schedule handler.
*/
struct exynos_drm_ippdrv {
struct list_head drv_list;
struct device *parent_dev;
struct device *dev;
struct drm_device *drm_dev;
u32 ipp_id;
bool dedicated;
struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
struct workqueue_struct *event_workq;
struct drm_exynos_ipp_cmd_node *cmd;
struct list_head cmd_list;
struct drm_exynos_ipp_prop_list *prop_list;
int (*check_property)(struct device *dev,
struct drm_exynos_ipp_property *property);
int (*reset)(struct device *dev);
int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
void (*sched_event)(struct work_struct *work);
};
#ifdef CONFIG_DRM_EXYNOS_IPP
extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
struct drm_file *file);
extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
struct drm_file *file);
extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
struct drm_file *file);
extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
struct drm_file *file);
extern int exynos_drm_ippnb_register(struct notifier_block *nb);
extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
extern void ipp_sched_cmd(struct work_struct *work);
extern void ipp_sched_event(struct work_struct *work);
#else
static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
{
return -ENODEV;
}
static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
{
return -ENODEV;
}
static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
void *data,
struct drm_file *file_priv)
{
return -ENOTTY;
}
static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
void *data,
struct drm_file *file_priv)
{
return -ENOTTY;
}
static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
void *data,
struct drm_file *file)
{
return -ENOTTY;
}
static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
void *data,
struct drm_file *file)
{
return -ENOTTY;
}
static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
{
return -ENODEV;
}
static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
{
return -ENODEV;
}
static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
{
return -ENOTTY;
}
#endif
#endif /* _EXYNOS_DRM_IPP_H_ */

View File

@ -40,7 +40,7 @@ static const uint32_t formats[] = {
* CRTC ---------------- * CRTC ----------------
* ^ start ^ end * ^ start ^ end
* *
* There are six cases from a to b. * There are six cases from a to f.
* *
* <----- SCREEN -----> * <----- SCREEN ----->
* 0 last * 0 last
@ -93,11 +93,9 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
} }
overlay->dma_addr[i] = buffer->dma_addr; overlay->dma_addr[i] = buffer->dma_addr;
overlay->vaddr[i] = buffer->kvaddr;
DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n", DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
i, (unsigned long)overlay->vaddr[i], i, (unsigned long)overlay->dma_addr[i]);
(unsigned long)overlay->dma_addr[i]);
} }
actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay); actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
@ -106,16 +104,12 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc_x < 0) { if (crtc_x < 0) {
if (actual_w) if (actual_w)
src_x -= crtc_x; src_x -= crtc_x;
else
src_x += crtc_w;
crtc_x = 0; crtc_x = 0;
} }
if (crtc_y < 0) { if (crtc_y < 0) {
if (actual_h) if (actual_h)
src_y -= crtc_y; src_y -= crtc_y;
else
src_y += crtc_h;
crtc_y = 0; crtc_y = 0;
} }

View File

@ -0,0 +1,855 @@
/*
* Copyright (C) 2012 Samsung Electronics Co.Ltd
* Authors:
* YoungJun Cho <yj44.cho@samsung.com>
* Eunchul Kim <chulspro.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundationr
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "regs-rotator.h"
#include "exynos_drm.h"
#include "exynos_drm_ipp.h"
/*
* Rotator supports image crop/rotator and input/output DMA operations.
* input DMA reads image data from the memory.
* output DMA writes image data to memory.
*
* M2M operation : supports crop/scale/rotation/csc so on.
* Memory ----> Rotator H/W ----> Memory.
*/
/*
* TODO
* 1. check suspend/resume api if needed.
* 2. need to check use case platform_device_id.
* 3. check src/dst size with, height.
* 4. need to add supported list in prop_list.
*/
#define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev))
#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
struct rot_context, ippdrv);
#define rot_read(offset) readl(rot->regs + (offset))
#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
enum rot_irq_status {
ROT_IRQ_STATUS_COMPLETE = 8,
ROT_IRQ_STATUS_ILLEGAL = 9,
};
/*
* A structure of limitation.
*
* @min_w: minimum width.
* @min_h: minimum height.
* @max_w: maximum width.
* @max_h: maximum height.
* @align: align size.
*/
struct rot_limit {
u32 min_w;
u32 min_h;
u32 max_w;
u32 max_h;
u32 align;
};
/*
* A structure of limitation table.
*
* @ycbcr420_2p: case of YUV.
* @rgb888: case of RGB.
*/
struct rot_limit_table {
struct rot_limit ycbcr420_2p;
struct rot_limit rgb888;
};
/*
* A structure of rotator context.
* @ippdrv: prepare initialization using ippdrv.
* @regs_res: register resources.
* @regs: memory mapped io registers.
* @clock: rotator gate clock.
* @limit_tbl: limitation of rotator.
* @irq: irq number.
* @cur_buf_id: current operation buffer id.
* @suspended: suspended state.
*/
struct rot_context {
struct exynos_drm_ippdrv ippdrv;
struct resource *regs_res;
void __iomem *regs;
struct clk *clock;
struct rot_limit_table *limit_tbl;
int irq;
int cur_buf_id[EXYNOS_DRM_OPS_MAX];
bool suspended;
};
static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
{
u32 val = rot_read(ROT_CONFIG);
if (enable == true)
val |= ROT_CONFIG_IRQ;
else
val &= ~ROT_CONFIG_IRQ;
rot_write(val, ROT_CONFIG);
}
static u32 rotator_reg_get_fmt(struct rot_context *rot)
{
u32 val = rot_read(ROT_CONTROL);
val &= ROT_CONTROL_FMT_MASK;
return val;
}
static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
{
u32 val = rot_read(ROT_STATUS);
val = ROT_STATUS_IRQ(val);
if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
return ROT_IRQ_STATUS_COMPLETE;
return ROT_IRQ_STATUS_ILLEGAL;
}
static irqreturn_t rotator_irq_handler(int irq, void *arg)
{
struct rot_context *rot = arg;
struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
enum rot_irq_status irq_status;
u32 val;
/* Get execution result */
irq_status = rotator_reg_get_irq_status(rot);
/* clear status */
val = rot_read(ROT_STATUS);
val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
rot_write(val, ROT_STATUS);
if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
event_work->ippdrv = ippdrv;
event_work->buf_id[EXYNOS_DRM_OPS_DST] =
rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
queue_work(ippdrv->event_workq,
(struct work_struct *)event_work);
} else
DRM_ERROR("the SFR is set illegally\n");
return IRQ_HANDLED;
}
static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
u32 *vsize)
{
struct rot_limit_table *limit_tbl = rot->limit_tbl;
struct rot_limit *limit;
u32 mask, val;
/* Get size limit */
if (fmt == ROT_CONTROL_FMT_RGB888)
limit = &limit_tbl->rgb888;
else
limit = &limit_tbl->ycbcr420_2p;
/* Get mask for rounding to nearest aligned val */
mask = ~((1 << limit->align) - 1);
/* Set aligned width */
val = ROT_ALIGN(*hsize, limit->align, mask);
if (val < limit->min_w)
*hsize = ROT_MIN(limit->min_w, mask);
else if (val > limit->max_w)
*hsize = ROT_MAX(limit->max_w, mask);
else
*hsize = val;
/* Set aligned height */
val = ROT_ALIGN(*vsize, limit->align, mask);
if (val < limit->min_h)
*vsize = ROT_MIN(limit->min_h, mask);
else if (val > limit->max_h)
*vsize = ROT_MAX(limit->max_h, mask);
else
*vsize = val;
}
static int rotator_src_set_fmt(struct device *dev, u32 fmt)
{
struct rot_context *rot = dev_get_drvdata(dev);
u32 val;
val = rot_read(ROT_CONTROL);
val &= ~ROT_CONTROL_FMT_MASK;
switch (fmt) {
case DRM_FORMAT_NV12:
val |= ROT_CONTROL_FMT_YCBCR420_2P;
break;
case DRM_FORMAT_XRGB8888:
val |= ROT_CONTROL_FMT_RGB888;
break;
default:
DRM_ERROR("invalid image format\n");
return -EINVAL;
}
rot_write(val, ROT_CONTROL);
return 0;
}
static inline bool rotator_check_reg_fmt(u32 fmt)
{
if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
(fmt == ROT_CONTROL_FMT_RGB888))
return true;
return false;
}
static int rotator_src_set_size(struct device *dev, int swap,
struct drm_exynos_pos *pos,
struct drm_exynos_sz *sz)
{
struct rot_context *rot = dev_get_drvdata(dev);
u32 fmt, hsize, vsize;
u32 val;
/* Get format */
fmt = rotator_reg_get_fmt(rot);
if (!rotator_check_reg_fmt(fmt)) {
DRM_ERROR("%s:invalid format.\n", __func__);
return -EINVAL;
}
/* Align buffer size */
hsize = sz->hsize;
vsize = sz->vsize;
rotator_align_size(rot, fmt, &hsize, &vsize);
/* Set buffer size configuration */
val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
rot_write(val, ROT_SRC_BUF_SIZE);
/* Set crop image position configuration */
val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
rot_write(val, ROT_SRC_CROP_POS);
val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
rot_write(val, ROT_SRC_CROP_SIZE);
return 0;
}
static int rotator_src_set_addr(struct device *dev,
struct drm_exynos_ipp_buf_info *buf_info,
u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
{
struct rot_context *rot = dev_get_drvdata(dev);
dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
u32 val, fmt, hsize, vsize;
int i;
/* Set current buf_id */
rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
switch (buf_type) {
case IPP_BUF_ENQUEUE:
/* Set address configuration */
for_each_ipp_planar(i)
addr[i] = buf_info->base[i];
/* Get format */
fmt = rotator_reg_get_fmt(rot);
if (!rotator_check_reg_fmt(fmt)) {
DRM_ERROR("%s:invalid format.\n", __func__);
return -EINVAL;
}
/* Re-set cb planar for NV12 format */
if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
!addr[EXYNOS_DRM_PLANAR_CB]) {
val = rot_read(ROT_SRC_BUF_SIZE);
hsize = ROT_GET_BUF_SIZE_W(val);
vsize = ROT_GET_BUF_SIZE_H(val);
/* Set cb planar */
addr[EXYNOS_DRM_PLANAR_CB] =
addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
}
for_each_ipp_planar(i)
rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
break;
case IPP_BUF_DEQUEUE:
for_each_ipp_planar(i)
rot_write(0x0, ROT_SRC_BUF_ADDR(i));
break;
default:
/* Nothing to do */
break;
}
return 0;
}
static int rotator_dst_set_transf(struct device *dev,
enum drm_exynos_degree degree,
enum drm_exynos_flip flip, bool *swap)
{
struct rot_context *rot = dev_get_drvdata(dev);
u32 val;
/* Set transform configuration */
val = rot_read(ROT_CONTROL);
val &= ~ROT_CONTROL_FLIP_MASK;
switch (flip) {
case EXYNOS_DRM_FLIP_VERTICAL:
val |= ROT_CONTROL_FLIP_VERTICAL;
break;
case EXYNOS_DRM_FLIP_HORIZONTAL:
val |= ROT_CONTROL_FLIP_HORIZONTAL;
break;
default:
/* Flip None */
break;
}
val &= ~ROT_CONTROL_ROT_MASK;
switch (degree) {
case EXYNOS_DRM_DEGREE_90:
val |= ROT_CONTROL_ROT_90;
break;
case EXYNOS_DRM_DEGREE_180:
val |= ROT_CONTROL_ROT_180;
break;
case EXYNOS_DRM_DEGREE_270:
val |= ROT_CONTROL_ROT_270;
break;
default:
/* Rotation 0 Degree */
break;
}
rot_write(val, ROT_CONTROL);
/* Check degree for setting buffer size swap */
if ((degree == EXYNOS_DRM_DEGREE_90) ||
(degree == EXYNOS_DRM_DEGREE_270))
*swap = true;
else
*swap = false;
return 0;
}
static int rotator_dst_set_size(struct device *dev, int swap,
struct drm_exynos_pos *pos,
struct drm_exynos_sz *sz)
{
struct rot_context *rot = dev_get_drvdata(dev);
u32 val, fmt, hsize, vsize;
/* Get format */
fmt = rotator_reg_get_fmt(rot);
if (!rotator_check_reg_fmt(fmt)) {
DRM_ERROR("%s:invalid format.\n", __func__);
return -EINVAL;
}
/* Align buffer size */
hsize = sz->hsize;
vsize = sz->vsize;
rotator_align_size(rot, fmt, &hsize, &vsize);
/* Set buffer size configuration */
val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
rot_write(val, ROT_DST_BUF_SIZE);
/* Set crop image position configuration */
val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
rot_write(val, ROT_DST_CROP_POS);
return 0;
}
static int rotator_dst_set_addr(struct device *dev,
struct drm_exynos_ipp_buf_info *buf_info,
u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
{
struct rot_context *rot = dev_get_drvdata(dev);
dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
u32 val, fmt, hsize, vsize;
int i;
/* Set current buf_id */
rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
switch (buf_type) {
case IPP_BUF_ENQUEUE:
/* Set address configuration */
for_each_ipp_planar(i)
addr[i] = buf_info->base[i];
/* Get format */
fmt = rotator_reg_get_fmt(rot);
if (!rotator_check_reg_fmt(fmt)) {
DRM_ERROR("%s:invalid format.\n", __func__);
return -EINVAL;
}
/* Re-set cb planar for NV12 format */
if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
!addr[EXYNOS_DRM_PLANAR_CB]) {
/* Get buf size */
val = rot_read(ROT_DST_BUF_SIZE);
hsize = ROT_GET_BUF_SIZE_W(val);
vsize = ROT_GET_BUF_SIZE_H(val);
/* Set cb planar */
addr[EXYNOS_DRM_PLANAR_CB] =
addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
}
for_each_ipp_planar(i)
rot_write(addr[i], ROT_DST_BUF_ADDR(i));
break;
case IPP_BUF_DEQUEUE:
for_each_ipp_planar(i)
rot_write(0x0, ROT_DST_BUF_ADDR(i));
break;
default:
/* Nothing to do */
break;
}
return 0;
}
static struct exynos_drm_ipp_ops rot_src_ops = {
.set_fmt = rotator_src_set_fmt,
.set_size = rotator_src_set_size,
.set_addr = rotator_src_set_addr,
};
static struct exynos_drm_ipp_ops rot_dst_ops = {
.set_transf = rotator_dst_set_transf,
.set_size = rotator_dst_set_size,
.set_addr = rotator_dst_set_addr,
};
static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
{
struct drm_exynos_ipp_prop_list *prop_list;
DRM_DEBUG_KMS("%s\n", __func__);
prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
if (!prop_list) {
DRM_ERROR("failed to alloc property list.\n");
return -ENOMEM;
}
prop_list->version = 1;
prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
(1 << EXYNOS_DRM_FLIP_HORIZONTAL);
prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
(1 << EXYNOS_DRM_DEGREE_90) |
(1 << EXYNOS_DRM_DEGREE_180) |
(1 << EXYNOS_DRM_DEGREE_270);
prop_list->csc = 0;
prop_list->crop = 0;
prop_list->scale = 0;
ippdrv->prop_list = prop_list;
return 0;
}
static inline bool rotator_check_drm_fmt(u32 fmt)
{
switch (fmt) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_NV12:
return true;
default:
DRM_DEBUG_KMS("%s:not support format\n", __func__);
return false;
}
}
static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
{
switch (flip) {
case EXYNOS_DRM_FLIP_NONE:
case EXYNOS_DRM_FLIP_VERTICAL:
case EXYNOS_DRM_FLIP_HORIZONTAL:
return true;
default:
DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
return false;
}
}
static int rotator_ippdrv_check_property(struct device *dev,
struct drm_exynos_ipp_property *property)
{
struct drm_exynos_ipp_config *src_config =
&property->config[EXYNOS_DRM_OPS_SRC];
struct drm_exynos_ipp_config *dst_config =
&property->config[EXYNOS_DRM_OPS_DST];
struct drm_exynos_pos *src_pos = &src_config->pos;
struct drm_exynos_pos *dst_pos = &dst_config->pos;
struct drm_exynos_sz *src_sz = &src_config->sz;
struct drm_exynos_sz *dst_sz = &dst_config->sz;
bool swap = false;
/* Check format configuration */
if (src_config->fmt != dst_config->fmt) {
DRM_DEBUG_KMS("%s:not support csc feature\n", __func__);
return -EINVAL;
}
if (!rotator_check_drm_fmt(dst_config->fmt)) {
DRM_DEBUG_KMS("%s:invalid format\n", __func__);
return -EINVAL;
}
/* Check transform configuration */
if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
DRM_DEBUG_KMS("%s:not support source-side rotation\n",
__func__);
return -EINVAL;
}
switch (dst_config->degree) {
case EXYNOS_DRM_DEGREE_90:
case EXYNOS_DRM_DEGREE_270:
swap = true;
case EXYNOS_DRM_DEGREE_0:
case EXYNOS_DRM_DEGREE_180:
/* No problem */
break;
default:
DRM_DEBUG_KMS("%s:invalid degree\n", __func__);
return -EINVAL;
}
if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__);
return -EINVAL;
}
if (!rotator_check_drm_flip(dst_config->flip)) {
DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
return -EINVAL;
}
/* Check size configuration */
if ((src_pos->x + src_pos->w > src_sz->hsize) ||
(src_pos->y + src_pos->h > src_sz->vsize)) {
DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__);
return -EINVAL;
}
if (swap) {
if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
(dst_pos->y + dst_pos->w > dst_sz->hsize)) {
DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
__func__);
return -EINVAL;
}
if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
DRM_DEBUG_KMS("%s:not support scale feature\n",
__func__);
return -EINVAL;
}
} else {
if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
(dst_pos->y + dst_pos->h > dst_sz->vsize)) {
DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
__func__);
return -EINVAL;
}
if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
DRM_DEBUG_KMS("%s:not support scale feature\n",
__func__);
return -EINVAL;
}
}
return 0;
}
static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
{
struct rot_context *rot = dev_get_drvdata(dev);
u32 val;
if (rot->suspended) {
DRM_ERROR("suspended state\n");
return -EPERM;
}
if (cmd != IPP_CMD_M2M) {
DRM_ERROR("not support cmd: %d\n", cmd);
return -EINVAL;
}
/* Set interrupt enable */
rotator_reg_set_irq(rot, true);
val = rot_read(ROT_CONTROL);
val |= ROT_CONTROL_START;
rot_write(val, ROT_CONTROL);
return 0;
}
static int __devinit rotator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rot_context *rot;
struct exynos_drm_ippdrv *ippdrv;
int ret;
rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
if (!rot) {
dev_err(dev, "failed to allocate rot\n");
return -ENOMEM;
}
rot->limit_tbl = (struct rot_limit_table *)
platform_get_device_id(pdev)->driver_data;
rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!rot->regs_res) {
dev_err(dev, "failed to find registers\n");
ret = -ENOENT;
goto err_get_resource;
}
rot->regs = devm_request_and_ioremap(dev, rot->regs_res);
if (!rot->regs) {
dev_err(dev, "failed to map register\n");
ret = -ENXIO;
goto err_get_resource;
}
rot->irq = platform_get_irq(pdev, 0);
if (rot->irq < 0) {
dev_err(dev, "failed to get irq\n");
ret = rot->irq;
goto err_get_irq;
}
ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
IRQF_ONESHOT, "drm_rotator", rot);
if (ret < 0) {
dev_err(dev, "failed to request irq\n");
goto err_get_irq;
}
rot->clock = clk_get(dev, "rotator");
if (IS_ERR_OR_NULL(rot->clock)) {
dev_err(dev, "failed to get clock\n");
ret = PTR_ERR(rot->clock);
goto err_clk_get;
}
pm_runtime_enable(dev);
ippdrv = &rot->ippdrv;
ippdrv->dev = dev;
ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
ippdrv->check_property = rotator_ippdrv_check_property;
ippdrv->start = rotator_ippdrv_start;
ret = rotator_init_prop_list(ippdrv);
if (ret < 0) {
dev_err(dev, "failed to init property list.\n");
goto err_ippdrv_register;
}
DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv);
platform_set_drvdata(pdev, rot);
ret = exynos_drm_ippdrv_register(ippdrv);
if (ret < 0) {
dev_err(dev, "failed to register drm rotator device\n");
goto err_ippdrv_register;
}
dev_info(dev, "The exynos rotator is probed successfully\n");
return 0;
err_ippdrv_register:
devm_kfree(dev, ippdrv->prop_list);
pm_runtime_disable(dev);
clk_put(rot->clock);
err_clk_get:
free_irq(rot->irq, rot);
err_get_irq:
devm_iounmap(dev, rot->regs);
err_get_resource:
devm_kfree(dev, rot);
return ret;
}
static int __devexit rotator_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rot_context *rot = dev_get_drvdata(dev);
struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
devm_kfree(dev, ippdrv->prop_list);
exynos_drm_ippdrv_unregister(ippdrv);
pm_runtime_disable(dev);
clk_put(rot->clock);
free_irq(rot->irq, rot);
devm_iounmap(dev, rot->regs);
devm_kfree(dev, rot);
return 0;
}
struct rot_limit_table rot_limit_tbl = {
.ycbcr420_2p = {
.min_w = 32,
.min_h = 32,
.max_w = SZ_32K,
.max_h = SZ_32K,
.align = 3,
},
.rgb888 = {
.min_w = 8,
.min_h = 8,
.max_w = SZ_8K,
.max_h = SZ_8K,
.align = 2,
},
};
struct platform_device_id rotator_driver_ids[] = {
{
.name = "exynos-rot",
.driver_data = (unsigned long)&rot_limit_tbl,
},
{},
};
static int rotator_clk_crtl(struct rot_context *rot, bool enable)
{
DRM_DEBUG_KMS("%s\n", __func__);
if (enable) {
clk_enable(rot->clock);
rot->suspended = false;
} else {
clk_disable(rot->clock);
rot->suspended = true;
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int rotator_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
DRM_DEBUG_KMS("%s\n", __func__);
if (pm_runtime_suspended(dev))
return 0;
return rotator_clk_crtl(rot, false);
}
static int rotator_resume(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
DRM_DEBUG_KMS("%s\n", __func__);
if (!pm_runtime_suspended(dev))
return rotator_clk_crtl(rot, true);
return 0;
}
#endif
#ifdef CONFIG_PM_RUNTIME
static int rotator_runtime_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
DRM_DEBUG_KMS("%s\n", __func__);
return rotator_clk_crtl(rot, false);
}
static int rotator_runtime_resume(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
DRM_DEBUG_KMS("%s\n", __func__);
return rotator_clk_crtl(rot, true);
}
#endif
static const struct dev_pm_ops rotator_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
NULL)
};
struct platform_driver rotator_driver = {
.probe = rotator_probe,
.remove = __devexit_p(rotator_remove),
.id_table = rotator_driver_ids,
.driver = {
.name = "exynos-rot",
.owner = THIS_MODULE,
.pm = &rotator_pm_ops,
},
};

View File

@ -0,0 +1,33 @@
/*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
*
* Authors:
* YoungJun Cho <yj44.cho@samsung.com>
* Eunchul Kim <chulspro.kim@samsung.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _EXYNOS_DRM_ROTATOR_H_
#define _EXYNOS_DRM_ROTATOR_H_
/* TODO */
#endif

View File

@ -39,7 +39,6 @@ struct vidi_win_data {
unsigned int fb_height; unsigned int fb_height;
unsigned int bpp; unsigned int bpp;
dma_addr_t dma_addr; dma_addr_t dma_addr;
void __iomem *vaddr;
unsigned int buf_offsize; unsigned int buf_offsize;
unsigned int line_size; /* bytes */ unsigned int line_size; /* bytes */
bool enabled; bool enabled;
@ -294,7 +293,6 @@ static void vidi_win_mode_set(struct device *dev,
win_data->fb_width = overlay->fb_width; win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height; win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset; win_data->dma_addr = overlay->dma_addr[0] + offset;
win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp; win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3); (overlay->bpp >> 3);
@ -309,9 +307,7 @@ static void vidi_win_mode_set(struct device *dev,
win_data->offset_x, win_data->offset_y); win_data->offset_x, win_data->offset_y);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height); win_data->ovl_width, win_data->ovl_height);
DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
(unsigned long)win_data->dma_addr,
(unsigned long)win_data->vaddr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width); overlay->fb_width, overlay->crtc_width);
} }
@ -382,7 +378,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t; struct drm_pending_vblank_event *e, *t;
struct timeval now; struct timeval now;
unsigned long flags; unsigned long flags;
bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags); spin_lock_irqsave(&drm_dev->event_lock, flags);
@ -392,8 +387,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe) if (crtc != e->pipe)
continue; continue;
is_checked = true;
do_gettimeofday(&now); do_gettimeofday(&now);
e->event.sequence = 0; e->event.sequence = 0;
e->event.tv_sec = now.tv_sec; e->event.tv_sec = now.tv_sec;
@ -401,22 +394,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list); list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait); wake_up_interruptible(&e->base.file_priv->event_wait);
} drm_vblank_put(drm_dev, crtc);
if (is_checked) {
/*
* call drm_vblank_put only in case that drm_vblank_get was
* called.
*/
if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
drm_vblank_put(drm_dev, crtc);
/*
* don't off vblank if vblank_disable_allowed is 1,
* because vblank would be off by timer handler.
*/
if (!drm_dev->vblank_disable_allowed)
drm_vblank_off(drm_dev, crtc);
} }
spin_unlock_irqrestore(&drm_dev->event_lock, flags); spin_unlock_irqrestore(&drm_dev->event_lock, flags);

View File

@ -50,6 +50,29 @@
#define MAX_HEIGHT 1080 #define MAX_HEIGHT 1080
#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) #define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
/* AVI header and aspect ratio */
#define HDMI_AVI_VERSION 0x02
#define HDMI_AVI_LENGTH 0x0D
#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4)
#define AVI_SAME_AS_PIC_ASPECT_RATIO 8
/* AUI header info */
#define HDMI_AUI_VERSION 0x01
#define HDMI_AUI_LENGTH 0x0A
/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
enum HDMI_PACKET_TYPE {
/* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
/* InfoFrame packet type */
HDMI_PACKET_TYPE_INFOFRAME = 0x80,
/* Vendor-Specific InfoFrame */
HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
/* Auxiliary Video information InfoFrame */
HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
/* Audio information InfoFrame */
HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
};
enum hdmi_type { enum hdmi_type {
HDMI_TYPE13, HDMI_TYPE13,
HDMI_TYPE14, HDMI_TYPE14,
@ -74,6 +97,7 @@ struct hdmi_context {
struct mutex hdmi_mutex; struct mutex hdmi_mutex;
void __iomem *regs; void __iomem *regs;
void *parent_ctx;
int external_irq; int external_irq;
int internal_irq; int internal_irq;
@ -84,7 +108,6 @@ struct hdmi_context {
int cur_conf; int cur_conf;
struct hdmi_resources res; struct hdmi_resources res;
void *parent_ctx;
int hpd_gpio; int hpd_gpio;
@ -182,6 +205,7 @@ struct hdmi_v13_conf {
int height; int height;
int vrefresh; int vrefresh;
bool interlace; bool interlace;
int cea_video_id;
const u8 *hdmiphy_data; const u8 *hdmiphy_data;
const struct hdmi_v13_preset_conf *conf; const struct hdmi_v13_preset_conf *conf;
}; };
@ -353,15 +377,20 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
}; };
static const struct hdmi_v13_conf hdmi_v13_confs[] = { static const struct hdmi_v13_conf hdmi_v13_confs[] = {
{ 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
{ 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, &hdmi_v13_conf_720p60 },
{ 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p }, { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
{ 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 }, &hdmi_v13_conf_720p60 },
{ 1920, 1080, 50, false, hdmiphy_v13_conf148_5, { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
&hdmi_v13_conf_1080p50 }, &hdmi_v13_conf_480p },
{ 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 }, { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
{ 1920, 1080, 60, false, hdmiphy_v13_conf148_5, &hdmi_v13_conf_1080i50 },
&hdmi_v13_conf_1080p60 }, { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
&hdmi_v13_conf_1080p50 },
{ 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
&hdmi_v13_conf_1080i60 },
{ 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
&hdmi_v13_conf_1080p60 },
}; };
/* HDMI Version 1.4 */ /* HDMI Version 1.4 */
@ -479,6 +508,7 @@ struct hdmi_conf {
int height; int height;
int vrefresh; int vrefresh;
bool interlace; bool interlace;
int cea_video_id;
const u8 *hdmiphy_data; const u8 *hdmiphy_data;
const struct hdmi_preset_conf *conf; const struct hdmi_preset_conf *conf;
}; };
@ -934,16 +964,21 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
}; };
static const struct hdmi_conf hdmi_confs[] = { static const struct hdmi_conf hdmi_confs[] = {
{ 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 }, { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
{ 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 }, { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
{ 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 }, { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
{ 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 }, { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
{ 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 }, { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
{ 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 }, { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
{ 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 }, { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
{ 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 }, { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
}; };
struct hdmi_infoframe {
enum HDMI_PACKET_TYPE type;
u8 ver;
u8 len;
};
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{ {
@ -1267,6 +1302,88 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
return hdmi_v14_conf_index(mode); return hdmi_v14_conf_index(mode);
} }
static u8 hdmi_chksum(struct hdmi_context *hdata,
u32 start, u8 len, u32 hdr_sum)
{
int i;
/* hdr_sum : header0 + header1 + header2
* start : start address of packet byte1
* len : packet bytes - 1 */
for (i = 0; i < len; ++i)
hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
/* return 2's complement of 8 bit hdr_sum */
return (u8)(~(hdr_sum & 0xff) + 1);
}
static void hdmi_reg_infoframe(struct hdmi_context *hdata,
struct hdmi_infoframe *infoframe)
{
u32 hdr_sum;
u8 chksum;
u32 aspect_ratio;
u32 mod;
u32 vic;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
if (hdata->dvi_mode) {
hdmi_reg_writeb(hdata, HDMI_VSI_CON,
HDMI_VSI_CON_DO_NOT_TRANSMIT);
hdmi_reg_writeb(hdata, HDMI_AVI_CON,
HDMI_AVI_CON_DO_NOT_TRANSMIT);
hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
return;
}
switch (infoframe->type) {
case HDMI_PACKET_TYPE_AVI:
hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
/* Output format zero hardcoded ,RGB YBCR selection */
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
AVI_ACTIVE_FORMAT_VALID |
AVI_UNDERSCANNED_DISPLAY_VALID);
aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
AVI_SAME_AS_PIC_ASPECT_RATIO);
if (hdata->type == HDMI_TYPE13)
vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
else
vic = hdmi_confs[hdata->cur_conf].cea_video_id;
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
infoframe->len, hdr_sum);
DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
break;
case HDMI_PACKET_TYPE_AUI:
hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
infoframe->len, hdr_sum);
DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
break;
default:
break;
}
}
static bool hdmi_is_connected(void *ctx) static bool hdmi_is_connected(void *ctx)
{ {
struct hdmi_context *hdata = ctx; struct hdmi_context *hdata = ctx;
@ -1293,6 +1410,7 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
raw_edid->width_cm, raw_edid->height_cm); raw_edid->width_cm, raw_edid->height_cm);
kfree(raw_edid);
} else { } else {
return -ENODEV; return -ENODEV;
} }
@ -1541,6 +1659,8 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
static void hdmi_conf_init(struct hdmi_context *hdata) static void hdmi_conf_init(struct hdmi_context *hdata)
{ {
struct hdmi_infoframe infoframe;
/* disable HPD interrupts */ /* disable HPD interrupts */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@ -1575,9 +1695,17 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else { } else {
infoframe.type = HDMI_PACKET_TYPE_AVI;
infoframe.ver = HDMI_AVI_VERSION;
infoframe.len = HDMI_AVI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
infoframe.type = HDMI_PACKET_TYPE_AUI;
infoframe.ver = HDMI_AUI_VERSION;
infoframe.len = HDMI_AUI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
/* enable AVI packet every vsync, fixes purple line problem */ /* enable AVI packet every vsync, fixes purple line problem */
hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5); hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
} }
} }
@ -1875,6 +2003,24 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
mdelay(10); mdelay(10);
} }
static void hdmiphy_poweron(struct hdmi_context *hdata)
{
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
if (hdata->type == HDMI_TYPE14)
hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
HDMI_PHY_POWER_OFF_EN);
}
static void hdmiphy_poweroff(struct hdmi_context *hdata)
{
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
if (hdata->type == HDMI_TYPE14)
hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
HDMI_PHY_POWER_OFF_EN);
}
static void hdmiphy_conf_apply(struct hdmi_context *hdata) static void hdmiphy_conf_apply(struct hdmi_context *hdata)
{ {
const u8 *hdmiphy_data; const u8 *hdmiphy_data;
@ -1978,9 +2124,18 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
index = hdmi_v14_conf_index(m); index = hdmi_v14_conf_index(m);
if (index >= 0) { if (index >= 0) {
struct drm_mode_object base;
struct list_head head;
DRM_INFO("desired mode doesn't exist so\n"); DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n"); DRM_INFO("use the most suitable mode among modes.\n");
/* preserve display mode header while copying. */
head = adjusted_mode->head;
base = adjusted_mode->base;
memcpy(adjusted_mode, m, sizeof(*m)); memcpy(adjusted_mode, m, sizeof(*m));
adjusted_mode->head = head;
adjusted_mode->base = base;
break; break;
} }
} }
@ -2034,12 +2189,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
mutex_unlock(&hdata->hdmi_mutex); mutex_unlock(&hdata->hdmi_mutex);
pm_runtime_get_sync(hdata->dev);
regulator_bulk_enable(res->regul_count, res->regul_bulk); regulator_bulk_enable(res->regul_count, res->regul_bulk);
clk_enable(res->hdmiphy); clk_enable(res->hdmiphy);
clk_enable(res->hdmi); clk_enable(res->hdmi);
clk_enable(res->sclk_hdmi); clk_enable(res->sclk_hdmi);
hdmiphy_poweron(hdata);
} }
static void hdmi_poweroff(struct hdmi_context *hdata) static void hdmi_poweroff(struct hdmi_context *hdata)
@ -2058,14 +2213,13 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
* its reset state seems to meet the condition. * its reset state seems to meet the condition.
*/ */
hdmiphy_conf_reset(hdata); hdmiphy_conf_reset(hdata);
hdmiphy_poweroff(hdata);
clk_disable(res->sclk_hdmi); clk_disable(res->sclk_hdmi);
clk_disable(res->hdmi); clk_disable(res->hdmi);
clk_disable(res->hdmiphy); clk_disable(res->hdmiphy);
regulator_bulk_disable(res->regul_count, res->regul_bulk); regulator_bulk_disable(res->regul_count, res->regul_bulk);
pm_runtime_put_sync(hdata->dev);
mutex_lock(&hdata->hdmi_mutex); mutex_lock(&hdata->hdmi_mutex);
hdata->powered = false; hdata->powered = false;
@ -2078,16 +2232,18 @@ static void hdmi_dpms(void *ctx, int mode)
{ {
struct hdmi_context *hdata = ctx; struct hdmi_context *hdata = ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode);
switch (mode) { switch (mode) {
case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_ON:
hdmi_poweron(hdata); if (pm_runtime_suspended(hdata->dev))
pm_runtime_get_sync(hdata->dev);
break; break;
case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF: case DRM_MODE_DPMS_OFF:
hdmi_poweroff(hdata); if (!pm_runtime_suspended(hdata->dev))
pm_runtime_put_sync(hdata->dev);
break; break;
default: default:
DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@ -2166,27 +2322,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
memset(res, 0, sizeof(*res)); memset(res, 0, sizeof(*res));
/* get clocks, power */ /* get clocks, power */
res->hdmi = clk_get(dev, "hdmi"); res->hdmi = devm_clk_get(dev, "hdmi");
if (IS_ERR_OR_NULL(res->hdmi)) { if (IS_ERR_OR_NULL(res->hdmi)) {
DRM_ERROR("failed to get clock 'hdmi'\n"); DRM_ERROR("failed to get clock 'hdmi'\n");
goto fail; goto fail;
} }
res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(res->sclk_hdmi)) { if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
goto fail; goto fail;
} }
res->sclk_pixel = clk_get(dev, "sclk_pixel"); res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
if (IS_ERR_OR_NULL(res->sclk_pixel)) { if (IS_ERR_OR_NULL(res->sclk_pixel)) {
DRM_ERROR("failed to get clock 'sclk_pixel'\n"); DRM_ERROR("failed to get clock 'sclk_pixel'\n");
goto fail; goto fail;
} }
res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy"); res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) { if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
goto fail; goto fail;
} }
res->hdmiphy = clk_get(dev, "hdmiphy"); res->hdmiphy = devm_clk_get(dev, "hdmiphy");
if (IS_ERR_OR_NULL(res->hdmiphy)) { if (IS_ERR_OR_NULL(res->hdmiphy)) {
DRM_ERROR("failed to get clock 'hdmiphy'\n"); DRM_ERROR("failed to get clock 'hdmiphy'\n");
goto fail; goto fail;
@ -2194,7 +2350,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
clk_set_parent(res->sclk_hdmi, res->sclk_pixel); clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
res->regul_bulk = kzalloc(ARRAY_SIZE(supply) * res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
sizeof(res->regul_bulk[0]), GFP_KERNEL); sizeof(res->regul_bulk[0]), GFP_KERNEL);
if (!res->regul_bulk) { if (!res->regul_bulk) {
DRM_ERROR("failed to get memory for regulators\n"); DRM_ERROR("failed to get memory for regulators\n");
@ -2204,7 +2360,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
res->regul_bulk[i].supply = supply[i]; res->regul_bulk[i].supply = supply[i];
res->regul_bulk[i].consumer = NULL; res->regul_bulk[i].consumer = NULL;
} }
ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
if (ret) { if (ret) {
DRM_ERROR("failed to get regulators\n"); DRM_ERROR("failed to get regulators\n");
goto fail; goto fail;
@ -2217,28 +2373,6 @@ fail:
return -ENODEV; return -ENODEV;
} }
static int hdmi_resources_cleanup(struct hdmi_context *hdata)
{
struct hdmi_resources *res = &hdata->res;
regulator_bulk_free(res->regul_count, res->regul_bulk);
/* kfree is NULL-safe */
kfree(res->regul_bulk);
if (!IS_ERR_OR_NULL(res->hdmiphy))
clk_put(res->hdmiphy);
if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
clk_put(res->sclk_hdmiphy);
if (!IS_ERR_OR_NULL(res->sclk_pixel))
clk_put(res->sclk_pixel);
if (!IS_ERR_OR_NULL(res->sclk_hdmi))
clk_put(res->sclk_hdmi);
if (!IS_ERR_OR_NULL(res->hdmi))
clk_put(res->hdmi);
memset(res, 0, sizeof(*res));
return 0;
}
static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy; static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
void hdmi_attach_ddc_client(struct i2c_client *ddc) void hdmi_attach_ddc_client(struct i2c_client *ddc)
@ -2306,6 +2440,7 @@ static struct platform_device_id hdmi_driver_types[] = {
} }
}; };
#ifdef CONFIG_OF
static struct of_device_id hdmi_match_types[] = { static struct of_device_id hdmi_match_types[] = {
{ {
.compatible = "samsung,exynos5-hdmi", .compatible = "samsung,exynos5-hdmi",
@ -2314,6 +2449,7 @@ static struct of_device_id hdmi_match_types[] = {
/* end node */ /* end node */
} }
}; };
#endif
static int __devinit hdmi_probe(struct platform_device *pdev) static int __devinit hdmi_probe(struct platform_device *pdev)
{ {
@ -2366,6 +2502,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
const struct of_device_id *match; const struct of_device_id *match;
match = of_match_node(of_match_ptr(hdmi_match_types), match = of_match_node(of_match_ptr(hdmi_match_types),
pdev->dev.of_node); pdev->dev.of_node);
if (match == NULL)
return -ENODEV;
hdata->type = (enum hdmi_type)match->data; hdata->type = (enum hdmi_type)match->data;
} else { } else {
hdata->type = (enum hdmi_type)platform_get_device_id hdata->type = (enum hdmi_type)platform_get_device_id
@ -2378,36 +2516,32 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
ret = hdmi_resources_init(hdata); ret = hdmi_resources_init(hdata);
if (ret) { if (ret) {
ret = -EINVAL;
DRM_ERROR("hdmi_resources_init failed\n"); DRM_ERROR("hdmi_resources_init failed\n");
goto err_data; return -EINVAL;
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) { if (!res) {
DRM_ERROR("failed to find registers\n"); DRM_ERROR("failed to find registers\n");
ret = -ENOENT; return -ENOENT;
goto err_resource;
} }
hdata->regs = devm_request_and_ioremap(&pdev->dev, res); hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!hdata->regs) { if (!hdata->regs) {
DRM_ERROR("failed to map registers\n"); DRM_ERROR("failed to map registers\n");
ret = -ENXIO; return -ENXIO;
goto err_resource;
} }
ret = gpio_request(hdata->hpd_gpio, "HPD"); ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
if (ret) { if (ret) {
DRM_ERROR("failed to request HPD gpio\n"); DRM_ERROR("failed to request HPD gpio\n");
goto err_resource; return ret;
} }
/* DDC i2c driver */ /* DDC i2c driver */
if (i2c_add_driver(&ddc_driver)) { if (i2c_add_driver(&ddc_driver)) {
DRM_ERROR("failed to register ddc i2c driver\n"); DRM_ERROR("failed to register ddc i2c driver\n");
ret = -ENOENT; return -ENOENT;
goto err_gpio;
} }
hdata->ddc_port = hdmi_ddc; hdata->ddc_port = hdmi_ddc;
@ -2470,11 +2604,6 @@ err_hdmiphy:
i2c_del_driver(&hdmiphy_driver); i2c_del_driver(&hdmiphy_driver);
err_ddc: err_ddc:
i2c_del_driver(&ddc_driver); i2c_del_driver(&ddc_driver);
err_gpio:
gpio_free(hdata->hpd_gpio);
err_resource:
hdmi_resources_cleanup(hdata);
err_data:
return ret; return ret;
} }
@ -2491,9 +2620,6 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
free_irq(hdata->internal_irq, hdata); free_irq(hdata->internal_irq, hdata);
free_irq(hdata->external_irq, hdata); free_irq(hdata->external_irq, hdata);
gpio_free(hdata->hpd_gpio);
hdmi_resources_cleanup(hdata);
/* hdmiphy i2c driver */ /* hdmiphy i2c driver */
i2c_del_driver(&hdmiphy_driver); i2c_del_driver(&hdmiphy_driver);
@ -2509,6 +2635,8 @@ static int hdmi_suspend(struct device *dev)
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx; struct hdmi_context *hdata = ctx->ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
disable_irq(hdata->internal_irq); disable_irq(hdata->internal_irq);
disable_irq(hdata->external_irq); disable_irq(hdata->external_irq);
@ -2516,6 +2644,11 @@ static int hdmi_suspend(struct device *dev)
if (ctx->drm_dev) if (ctx->drm_dev)
drm_helper_hpd_irq_event(ctx->drm_dev); drm_helper_hpd_irq_event(ctx->drm_dev);
if (pm_runtime_suspended(dev)) {
DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
return 0;
}
hdmi_poweroff(hdata); hdmi_poweroff(hdata);
return 0; return 0;
@ -2526,13 +2659,52 @@ static int hdmi_resume(struct device *dev)
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx; struct hdmi_context *hdata = ctx->ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
hdata->hpd = gpio_get_value(hdata->hpd_gpio);
enable_irq(hdata->external_irq); enable_irq(hdata->external_irq);
enable_irq(hdata->internal_irq); enable_irq(hdata->internal_irq);
if (!pm_runtime_suspended(dev)) {
DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
return 0;
}
hdmi_poweron(hdata);
return 0; return 0;
} }
#endif #endif
static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume); #ifdef CONFIG_PM_RUNTIME
static int hdmi_runtime_suspend(struct device *dev)
{
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
hdmi_poweroff(hdata);
return 0;
}
static int hdmi_runtime_resume(struct device *dev)
{
struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
struct hdmi_context *hdata = ctx->ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
hdmi_poweron(hdata);
return 0;
}
#endif
static const struct dev_pm_ops hdmi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
};
struct platform_driver hdmi_driver = { struct platform_driver hdmi_driver = {
.probe = hdmi_probe, .probe = hdmi_probe,
@ -2542,6 +2714,6 @@ struct platform_driver hdmi_driver = {
.name = "exynos-hdmi", .name = "exynos-hdmi",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.pm = &hdmi_pm_ops, .pm = &hdmi_pm_ops,
.of_match_table = hdmi_match_types, .of_match_table = of_match_ptr(hdmi_match_types),
}, },
}; };

View File

@ -46,6 +46,7 @@ static const struct i2c_device_id hdmiphy_id[] = {
{ }, { },
}; };
#ifdef CONFIG_OF
static struct of_device_id hdmiphy_match_types[] = { static struct of_device_id hdmiphy_match_types[] = {
{ {
.compatible = "samsung,exynos5-hdmiphy", .compatible = "samsung,exynos5-hdmiphy",
@ -53,12 +54,13 @@ static struct of_device_id hdmiphy_match_types[] = {
/* end node */ /* end node */
} }
}; };
#endif
struct i2c_driver hdmiphy_driver = { struct i2c_driver hdmiphy_driver = {
.driver = { .driver = {
.name = "exynos-hdmiphy", .name = "exynos-hdmiphy",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.of_match_table = hdmiphy_match_types, .of_match_table = of_match_ptr(hdmiphy_match_types),
}, },
.id_table = hdmiphy_id, .id_table = hdmiphy_id,
.probe = hdmiphy_probe, .probe = hdmiphy_probe,

View File

@ -36,14 +36,13 @@
#include "exynos_drm_drv.h" #include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h" #include "exynos_drm_hdmi.h"
#include "exynos_drm_iommu.h"
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) #define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
struct hdmi_win_data { struct hdmi_win_data {
dma_addr_t dma_addr; dma_addr_t dma_addr;
void __iomem *vaddr;
dma_addr_t chroma_dma_addr; dma_addr_t chroma_dma_addr;
void __iomem *chroma_vaddr;
uint32_t pixel_format; uint32_t pixel_format;
unsigned int bpp; unsigned int bpp;
unsigned int crtc_x; unsigned int crtc_x;
@ -59,6 +58,8 @@ struct hdmi_win_data {
unsigned int mode_width; unsigned int mode_width;
unsigned int mode_height; unsigned int mode_height;
unsigned int scan_flags; unsigned int scan_flags;
bool enabled;
bool resume;
}; };
struct mixer_resources { struct mixer_resources {
@ -80,6 +81,7 @@ enum mixer_version_id {
struct mixer_context { struct mixer_context {
struct device *dev; struct device *dev;
struct drm_device *drm_dev;
int pipe; int pipe;
bool interlace; bool interlace;
bool powered; bool powered;
@ -90,6 +92,9 @@ struct mixer_context {
struct mixer_resources mixer_res; struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR]; struct hdmi_win_data win_data[MIXER_WIN_NR];
enum mixer_version_id mxr_ver; enum mixer_version_id mxr_ver;
void *parent_ctx;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
}; };
struct mixer_drv_data { struct mixer_drv_data {
@ -665,58 +670,22 @@ static void mixer_win_reset(struct mixer_context *ctx)
spin_unlock_irqrestore(&res->reg_slock, flags); spin_unlock_irqrestore(&res->reg_slock, flags);
} }
static void mixer_poweron(struct mixer_context *ctx) static int mixer_iommu_on(void *ctx, bool enable)
{ {
struct mixer_resources *res = &ctx->mixer_res; struct exynos_drm_hdmi_context *drm_hdmi_ctx;
struct mixer_context *mdata = ctx;
struct drm_device *drm_dev;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); drm_hdmi_ctx = mdata->parent_ctx;
drm_dev = drm_hdmi_ctx->drm_dev;
mutex_lock(&ctx->mixer_mutex); if (is_drm_iommu_supported(drm_dev)) {
if (ctx->powered) { if (enable)
mutex_unlock(&ctx->mixer_mutex); return drm_iommu_attach_device(drm_dev, mdata->dev);
return;
drm_iommu_detach_device(drm_dev, mdata->dev);
} }
ctx->powered = true; return 0;
mutex_unlock(&ctx->mixer_mutex);
pm_runtime_get_sync(ctx->dev);
clk_enable(res->mixer);
if (ctx->vp_enabled) {
clk_enable(res->vp);
clk_enable(res->sclk_mixer);
}
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
mixer_win_reset(ctx);
}
static void mixer_poweroff(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
mutex_lock(&ctx->mixer_mutex);
if (!ctx->powered)
goto out;
mutex_unlock(&ctx->mixer_mutex);
ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
clk_disable(res->mixer);
if (ctx->vp_enabled) {
clk_disable(res->vp);
clk_disable(res->sclk_mixer);
}
pm_runtime_put_sync(ctx->dev);
mutex_lock(&ctx->mixer_mutex);
ctx->powered = false;
out:
mutex_unlock(&ctx->mixer_mutex);
} }
static int mixer_enable_vblank(void *ctx, int pipe) static int mixer_enable_vblank(void *ctx, int pipe)
@ -746,39 +715,6 @@ static void mixer_disable_vblank(void *ctx)
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
} }
static void mixer_dpms(void *ctx, int mode)
{
struct mixer_context *mixer_ctx = ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
switch (mode) {
case DRM_MODE_DPMS_ON:
mixer_poweron(mixer_ctx);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
mixer_poweroff(mixer_ctx);
break;
default:
DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
break;
}
}
static void mixer_wait_for_vblank(void *ctx)
{
struct mixer_context *mixer_ctx = ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
int ret;
ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
MXR_INT_STATUS_VSYNC), 50);
if (ret < 0)
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
static void mixer_win_mode_set(void *ctx, static void mixer_win_mode_set(void *ctx,
struct exynos_drm_overlay *overlay) struct exynos_drm_overlay *overlay)
{ {
@ -811,9 +747,7 @@ static void mixer_win_mode_set(void *ctx,
win_data = &mixer_ctx->win_data[win]; win_data = &mixer_ctx->win_data[win];
win_data->dma_addr = overlay->dma_addr[0]; win_data->dma_addr = overlay->dma_addr[0];
win_data->vaddr = overlay->vaddr[0];
win_data->chroma_dma_addr = overlay->dma_addr[1]; win_data->chroma_dma_addr = overlay->dma_addr[1];
win_data->chroma_vaddr = overlay->vaddr[1];
win_data->pixel_format = overlay->pixel_format; win_data->pixel_format = overlay->pixel_format;
win_data->bpp = overlay->bpp; win_data->bpp = overlay->bpp;
@ -845,6 +779,8 @@ static void mixer_win_commit(void *ctx, int win)
vp_video_buffer(mixer_ctx, win); vp_video_buffer(mixer_ctx, win);
else else
mixer_graph_buffer(mixer_ctx, win); mixer_graph_buffer(mixer_ctx, win);
mixer_ctx->win_data[win].enabled = true;
} }
static void mixer_win_disable(void *ctx, int win) static void mixer_win_disable(void *ctx, int win)
@ -855,6 +791,14 @@ static void mixer_win_disable(void *ctx, int win)
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
mutex_lock(&mixer_ctx->mixer_mutex);
if (!mixer_ctx->powered) {
mutex_unlock(&mixer_ctx->mixer_mutex);
mixer_ctx->win_data[win].resume = false;
return;
}
mutex_unlock(&mixer_ctx->mixer_mutex);
spin_lock_irqsave(&res->reg_slock, flags); spin_lock_irqsave(&res->reg_slock, flags);
mixer_vsync_set_update(mixer_ctx, false); mixer_vsync_set_update(mixer_ctx, false);
@ -862,16 +806,144 @@ static void mixer_win_disable(void *ctx, int win)
mixer_vsync_set_update(mixer_ctx, true); mixer_vsync_set_update(mixer_ctx, true);
spin_unlock_irqrestore(&res->reg_slock, flags); spin_unlock_irqrestore(&res->reg_slock, flags);
mixer_ctx->win_data[win].enabled = false;
}
static void mixer_wait_for_vblank(void *ctx)
{
struct mixer_context *mixer_ctx = ctx;
mutex_lock(&mixer_ctx->mixer_mutex);
if (!mixer_ctx->powered) {
mutex_unlock(&mixer_ctx->mixer_mutex);
return;
}
mutex_unlock(&mixer_ctx->mixer_mutex);
atomic_set(&mixer_ctx->wait_vsync_event, 1);
/*
* wait for MIXER to signal VSYNC interrupt or return after
* timeout which is set to 50ms (refresh rate of 20).
*/
if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
!atomic_read(&mixer_ctx->wait_vsync_event),
DRM_HZ/20))
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
static void mixer_window_suspend(struct mixer_context *ctx)
{
struct hdmi_win_data *win_data;
int i;
for (i = 0; i < MIXER_WIN_NR; i++) {
win_data = &ctx->win_data[i];
win_data->resume = win_data->enabled;
mixer_win_disable(ctx, i);
}
mixer_wait_for_vblank(ctx);
}
static void mixer_window_resume(struct mixer_context *ctx)
{
struct hdmi_win_data *win_data;
int i;
for (i = 0; i < MIXER_WIN_NR; i++) {
win_data = &ctx->win_data[i];
win_data->enabled = win_data->resume;
win_data->resume = false;
}
}
static void mixer_poweron(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
mutex_lock(&ctx->mixer_mutex);
if (ctx->powered) {
mutex_unlock(&ctx->mixer_mutex);
return;
}
ctx->powered = true;
mutex_unlock(&ctx->mixer_mutex);
clk_enable(res->mixer);
if (ctx->vp_enabled) {
clk_enable(res->vp);
clk_enable(res->sclk_mixer);
}
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
mixer_win_reset(ctx);
mixer_window_resume(ctx);
}
static void mixer_poweroff(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
mutex_lock(&ctx->mixer_mutex);
if (!ctx->powered)
goto out;
mutex_unlock(&ctx->mixer_mutex);
mixer_window_suspend(ctx);
ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
clk_disable(res->mixer);
if (ctx->vp_enabled) {
clk_disable(res->vp);
clk_disable(res->sclk_mixer);
}
mutex_lock(&ctx->mixer_mutex);
ctx->powered = false;
out:
mutex_unlock(&ctx->mixer_mutex);
}
static void mixer_dpms(void *ctx, int mode)
{
struct mixer_context *mixer_ctx = ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
switch (mode) {
case DRM_MODE_DPMS_ON:
if (pm_runtime_suspended(mixer_ctx->dev))
pm_runtime_get_sync(mixer_ctx->dev);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (!pm_runtime_suspended(mixer_ctx->dev))
pm_runtime_put_sync(mixer_ctx->dev);
break;
default:
DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
break;
}
} }
static struct exynos_mixer_ops mixer_ops = { static struct exynos_mixer_ops mixer_ops = {
/* manager */ /* manager */
.iommu_on = mixer_iommu_on,
.enable_vblank = mixer_enable_vblank, .enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank, .disable_vblank = mixer_disable_vblank,
.wait_for_vblank = mixer_wait_for_vblank,
.dpms = mixer_dpms, .dpms = mixer_dpms,
/* overlay */ /* overlay */
.wait_for_vblank = mixer_wait_for_vblank,
.win_mode_set = mixer_win_mode_set, .win_mode_set = mixer_win_mode_set,
.win_commit = mixer_win_commit, .win_commit = mixer_win_commit,
.win_disable = mixer_win_disable, .win_disable = mixer_win_disable,
@ -884,7 +956,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
struct drm_pending_vblank_event *e, *t; struct drm_pending_vblank_event *e, *t;
struct timeval now; struct timeval now;
unsigned long flags; unsigned long flags;
bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags); spin_lock_irqsave(&drm_dev->event_lock, flags);
@ -894,7 +965,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
if (crtc != e->pipe) if (crtc != e->pipe)
continue; continue;
is_checked = true;
do_gettimeofday(&now); do_gettimeofday(&now);
e->event.sequence = 0; e->event.sequence = 0;
e->event.tv_sec = now.tv_sec; e->event.tv_sec = now.tv_sec;
@ -902,16 +972,9 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
list_move_tail(&e->base.link, &e->base.file_priv->event_list); list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait); wake_up_interruptible(&e->base.file_priv->event_wait);
drm_vblank_put(drm_dev, crtc);
} }
if (is_checked)
/*
* call drm_vblank_put only in case that drm_vblank_get was
* called.
*/
if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
drm_vblank_put(drm_dev, crtc);
spin_unlock_irqrestore(&drm_dev->event_lock, flags); spin_unlock_irqrestore(&drm_dev->event_lock, flags);
} }
@ -944,6 +1007,12 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe); drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe); mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
atomic_set(&ctx->wait_vsync_event, 0);
DRM_WAKEUP(&ctx->wait_vsync_queue);
}
} }
out: out:
@ -971,57 +1040,45 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
spin_lock_init(&mixer_res->reg_slock); spin_lock_init(&mixer_res->reg_slock);
mixer_res->mixer = clk_get(dev, "mixer"); mixer_res->mixer = devm_clk_get(dev, "mixer");
if (IS_ERR_OR_NULL(mixer_res->mixer)) { if (IS_ERR_OR_NULL(mixer_res->mixer)) {
dev_err(dev, "failed to get clock 'mixer'\n"); dev_err(dev, "failed to get clock 'mixer'\n");
ret = -ENODEV; return -ENODEV;
goto fail;
} }
mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) { if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
ret = -ENODEV; return -ENODEV;
goto fail;
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) { if (res == NULL) {
dev_err(dev, "get memory resource failed.\n"); dev_err(dev, "get memory resource failed.\n");
ret = -ENXIO; return -ENXIO;
goto fail;
} }
mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start, mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res)); resource_size(res));
if (mixer_res->mixer_regs == NULL) { if (mixer_res->mixer_regs == NULL) {
dev_err(dev, "register mapping failed.\n"); dev_err(dev, "register mapping failed.\n");
ret = -ENXIO; return -ENXIO;
goto fail;
} }
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) { if (res == NULL) {
dev_err(dev, "get interrupt resource failed.\n"); dev_err(dev, "get interrupt resource failed.\n");
ret = -ENXIO; return -ENXIO;
goto fail;
} }
ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler, ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
0, "drm_mixer", ctx); 0, "drm_mixer", ctx);
if (ret) { if (ret) {
dev_err(dev, "request interrupt failed.\n"); dev_err(dev, "request interrupt failed.\n");
goto fail; return ret;
} }
mixer_res->irq = res->start; mixer_res->irq = res->start;
return 0; return 0;
fail:
if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
clk_put(mixer_res->sclk_hdmi);
if (!IS_ERR_OR_NULL(mixer_res->mixer))
clk_put(mixer_res->mixer);
return ret;
} }
static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
@ -1031,25 +1088,21 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res; struct resource *res;
int ret;
mixer_res->vp = clk_get(dev, "vp"); mixer_res->vp = devm_clk_get(dev, "vp");
if (IS_ERR_OR_NULL(mixer_res->vp)) { if (IS_ERR_OR_NULL(mixer_res->vp)) {
dev_err(dev, "failed to get clock 'vp'\n"); dev_err(dev, "failed to get clock 'vp'\n");
ret = -ENODEV; return -ENODEV;
goto fail;
} }
mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer"); mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n"); dev_err(dev, "failed to get clock 'sclk_mixer'\n");
ret = -ENODEV; return -ENODEV;
goto fail;
} }
mixer_res->sclk_dac = clk_get(dev, "sclk_dac"); mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
dev_err(dev, "failed to get clock 'sclk_dac'\n"); dev_err(dev, "failed to get clock 'sclk_dac'\n");
ret = -ENODEV; return -ENODEV;
goto fail;
} }
if (mixer_res->sclk_hdmi) if (mixer_res->sclk_hdmi)
@ -1058,28 +1111,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
res = platform_get_resource(pdev, IORESOURCE_MEM, 1); res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res == NULL) { if (res == NULL) {
dev_err(dev, "get memory resource failed.\n"); dev_err(dev, "get memory resource failed.\n");
ret = -ENXIO; return -ENXIO;
goto fail;
} }
mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res)); resource_size(res));
if (mixer_res->vp_regs == NULL) { if (mixer_res->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n"); dev_err(dev, "register mapping failed.\n");
ret = -ENXIO; return -ENXIO;
goto fail;
} }
return 0; return 0;
fail:
if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
clk_put(mixer_res->sclk_dac);
if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
clk_put(mixer_res->sclk_mixer);
if (!IS_ERR_OR_NULL(mixer_res->vp))
clk_put(mixer_res->vp);
return ret;
} }
static struct mixer_drv_data exynos5_mxr_drv_data = { static struct mixer_drv_data exynos5_mxr_drv_data = {
@ -1149,9 +1191,12 @@ static int __devinit mixer_probe(struct platform_device *pdev)
} }
ctx->dev = &pdev->dev; ctx->dev = &pdev->dev;
ctx->parent_ctx = (void *)drm_hdmi_ctx;
drm_hdmi_ctx->ctx = (void *)ctx; drm_hdmi_ctx->ctx = (void *)ctx;
ctx->vp_enabled = drv->is_vp_enabled; ctx->vp_enabled = drv->is_vp_enabled;
ctx->mxr_ver = drv->version; ctx->mxr_ver = drv->version;
DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
platform_set_drvdata(pdev, drm_hdmi_ctx); platform_set_drvdata(pdev, drm_hdmi_ctx);
@ -1202,13 +1247,66 @@ static int mixer_suspend(struct device *dev)
struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev); struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
struct mixer_context *ctx = drm_hdmi_ctx->ctx; struct mixer_context *ctx = drm_hdmi_ctx->ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
if (pm_runtime_suspended(dev)) {
DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
return 0;
}
mixer_poweroff(ctx); mixer_poweroff(ctx);
return 0; return 0;
} }
static int mixer_resume(struct device *dev)
{
struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
struct mixer_context *ctx = drm_hdmi_ctx->ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
if (!pm_runtime_suspended(dev)) {
DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
return 0;
}
mixer_poweron(ctx);
return 0;
}
#endif #endif
static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL); #ifdef CONFIG_PM_RUNTIME
static int mixer_runtime_suspend(struct device *dev)
{
struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
struct mixer_context *ctx = drm_hdmi_ctx->ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
mixer_poweroff(ctx);
return 0;
}
static int mixer_runtime_resume(struct device *dev)
{
struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
struct mixer_context *ctx = drm_hdmi_ctx->ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
mixer_poweron(ctx);
return 0;
}
#endif
static const struct dev_pm_ops mixer_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
};
struct platform_driver mixer_driver = { struct platform_driver mixer_driver = {
.driver = { .driver = {

View File

@ -0,0 +1,669 @@
/* drivers/gpu/drm/exynos/regs-fimc.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Register definition file for Samsung Camera Interface (FIMC) driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef EXYNOS_REGS_FIMC_H
#define EXYNOS_REGS_FIMC_H
/*
* Register part
*/
/* Input source format */
#define EXYNOS_CISRCFMT (0x00)
/* Window offset */
#define EXYNOS_CIWDOFST (0x04)
/* Global control */
#define EXYNOS_CIGCTRL (0x08)
/* Window offset 2 */
#define EXYNOS_CIWDOFST2 (0x14)
/* Y 1st frame start address for output DMA */
#define EXYNOS_CIOYSA1 (0x18)
/* Y 2nd frame start address for output DMA */
#define EXYNOS_CIOYSA2 (0x1c)
/* Y 3rd frame start address for output DMA */
#define EXYNOS_CIOYSA3 (0x20)
/* Y 4th frame start address for output DMA */
#define EXYNOS_CIOYSA4 (0x24)
/* Cb 1st frame start address for output DMA */
#define EXYNOS_CIOCBSA1 (0x28)
/* Cb 2nd frame start address for output DMA */
#define EXYNOS_CIOCBSA2 (0x2c)
/* Cb 3rd frame start address for output DMA */
#define EXYNOS_CIOCBSA3 (0x30)
/* Cb 4th frame start address for output DMA */
#define EXYNOS_CIOCBSA4 (0x34)
/* Cr 1st frame start address for output DMA */
#define EXYNOS_CIOCRSA1 (0x38)
/* Cr 2nd frame start address for output DMA */
#define EXYNOS_CIOCRSA2 (0x3c)
/* Cr 3rd frame start address for output DMA */
#define EXYNOS_CIOCRSA3 (0x40)
/* Cr 4th frame start address for output DMA */
#define EXYNOS_CIOCRSA4 (0x44)
/* Target image format */
#define EXYNOS_CITRGFMT (0x48)
/* Output DMA control */
#define EXYNOS_CIOCTRL (0x4c)
/* Pre-scaler control 1 */
#define EXYNOS_CISCPRERATIO (0x50)
/* Pre-scaler control 2 */
#define EXYNOS_CISCPREDST (0x54)
/* Main scaler control */
#define EXYNOS_CISCCTRL (0x58)
/* Target area */
#define EXYNOS_CITAREA (0x5c)
/* Status */
#define EXYNOS_CISTATUS (0x64)
/* Status2 */
#define EXYNOS_CISTATUS2 (0x68)
/* Image capture enable command */
#define EXYNOS_CIIMGCPT (0xc0)
/* Capture sequence */
#define EXYNOS_CICPTSEQ (0xc4)
/* Image effects */
#define EXYNOS_CIIMGEFF (0xd0)
/* Y frame start address for input DMA */
#define EXYNOS_CIIYSA0 (0xd4)
/* Cb frame start address for input DMA */
#define EXYNOS_CIICBSA0 (0xd8)
/* Cr frame start address for input DMA */
#define EXYNOS_CIICRSA0 (0xdc)
/* Input DMA Y Line Skip */
#define EXYNOS_CIILINESKIP_Y (0xec)
/* Input DMA Cb Line Skip */
#define EXYNOS_CIILINESKIP_CB (0xf0)
/* Input DMA Cr Line Skip */
#define EXYNOS_CIILINESKIP_CR (0xf4)
/* Real input DMA image size */
#define EXYNOS_CIREAL_ISIZE (0xf8)
/* Input DMA control */
#define EXYNOS_MSCTRL (0xfc)
/* Y frame start address for input DMA */
#define EXYNOS_CIIYSA1 (0x144)
/* Cb frame start address for input DMA */
#define EXYNOS_CIICBSA1 (0x148)
/* Cr frame start address for input DMA */
#define EXYNOS_CIICRSA1 (0x14c)
/* Output DMA Y offset */
#define EXYNOS_CIOYOFF (0x168)
/* Output DMA CB offset */
#define EXYNOS_CIOCBOFF (0x16c)
/* Output DMA CR offset */
#define EXYNOS_CIOCROFF (0x170)
/* Input DMA Y offset */
#define EXYNOS_CIIYOFF (0x174)
/* Input DMA CB offset */
#define EXYNOS_CIICBOFF (0x178)
/* Input DMA CR offset */
#define EXYNOS_CIICROFF (0x17c)
/* Input DMA original image size */
#define EXYNOS_ORGISIZE (0x180)
/* Output DMA original image size */
#define EXYNOS_ORGOSIZE (0x184)
/* Real output DMA image size */
#define EXYNOS_CIEXTEN (0x188)
/* DMA parameter */
#define EXYNOS_CIDMAPARAM (0x18c)
/* MIPI CSI image format */
#define EXYNOS_CSIIMGFMT (0x194)
/* FIMC Clock Source Select */
#define EXYNOS_MISC_FIMC (0x198)
/* Add for FIMC v5.1 */
/* Output Frame Buffer Sequence */
#define EXYNOS_CIFCNTSEQ (0x1fc)
/* Y 5th frame start address for output DMA */
#define EXYNOS_CIOYSA5 (0x200)
/* Y 6th frame start address for output DMA */
#define EXYNOS_CIOYSA6 (0x204)
/* Y 7th frame start address for output DMA */
#define EXYNOS_CIOYSA7 (0x208)
/* Y 8th frame start address for output DMA */
#define EXYNOS_CIOYSA8 (0x20c)
/* Y 9th frame start address for output DMA */
#define EXYNOS_CIOYSA9 (0x210)
/* Y 10th frame start address for output DMA */
#define EXYNOS_CIOYSA10 (0x214)
/* Y 11th frame start address for output DMA */
#define EXYNOS_CIOYSA11 (0x218)
/* Y 12th frame start address for output DMA */
#define EXYNOS_CIOYSA12 (0x21c)
/* Y 13th frame start address for output DMA */
#define EXYNOS_CIOYSA13 (0x220)
/* Y 14th frame start address for output DMA */
#define EXYNOS_CIOYSA14 (0x224)
/* Y 15th frame start address for output DMA */
#define EXYNOS_CIOYSA15 (0x228)
/* Y 16th frame start address for output DMA */
#define EXYNOS_CIOYSA16 (0x22c)
/* Y 17th frame start address for output DMA */
#define EXYNOS_CIOYSA17 (0x230)
/* Y 18th frame start address for output DMA */
#define EXYNOS_CIOYSA18 (0x234)
/* Y 19th frame start address for output DMA */
#define EXYNOS_CIOYSA19 (0x238)
/* Y 20th frame start address for output DMA */
#define EXYNOS_CIOYSA20 (0x23c)
/* Y 21th frame start address for output DMA */
#define EXYNOS_CIOYSA21 (0x240)
/* Y 22th frame start address for output DMA */
#define EXYNOS_CIOYSA22 (0x244)
/* Y 23th frame start address for output DMA */
#define EXYNOS_CIOYSA23 (0x248)
/* Y 24th frame start address for output DMA */
#define EXYNOS_CIOYSA24 (0x24c)
/* Y 25th frame start address for output DMA */
#define EXYNOS_CIOYSA25 (0x250)
/* Y 26th frame start address for output DMA */
#define EXYNOS_CIOYSA26 (0x254)
/* Y 27th frame start address for output DMA */
#define EXYNOS_CIOYSA27 (0x258)
/* Y 28th frame start address for output DMA */
#define EXYNOS_CIOYSA28 (0x25c)
/* Y 29th frame start address for output DMA */
#define EXYNOS_CIOYSA29 (0x260)
/* Y 30th frame start address for output DMA */
#define EXYNOS_CIOYSA30 (0x264)
/* Y 31th frame start address for output DMA */
#define EXYNOS_CIOYSA31 (0x268)
/* Y 32th frame start address for output DMA */
#define EXYNOS_CIOYSA32 (0x26c)
/* CB 5th frame start address for output DMA */
#define EXYNOS_CIOCBSA5 (0x270)
/* CB 6th frame start address for output DMA */
#define EXYNOS_CIOCBSA6 (0x274)
/* CB 7th frame start address for output DMA */
#define EXYNOS_CIOCBSA7 (0x278)
/* CB 8th frame start address for output DMA */
#define EXYNOS_CIOCBSA8 (0x27c)
/* CB 9th frame start address for output DMA */
#define EXYNOS_CIOCBSA9 (0x280)
/* CB 10th frame start address for output DMA */
#define EXYNOS_CIOCBSA10 (0x284)
/* CB 11th frame start address for output DMA */
#define EXYNOS_CIOCBSA11 (0x288)
/* CB 12th frame start address for output DMA */
#define EXYNOS_CIOCBSA12 (0x28c)
/* CB 13th frame start address for output DMA */
#define EXYNOS_CIOCBSA13 (0x290)
/* CB 14th frame start address for output DMA */
#define EXYNOS_CIOCBSA14 (0x294)
/* CB 15th frame start address for output DMA */
#define EXYNOS_CIOCBSA15 (0x298)
/* CB 16th frame start address for output DMA */
#define EXYNOS_CIOCBSA16 (0x29c)
/* CB 17th frame start address for output DMA */
#define EXYNOS_CIOCBSA17 (0x2a0)
/* CB 18th frame start address for output DMA */
#define EXYNOS_CIOCBSA18 (0x2a4)
/* CB 19th frame start address for output DMA */
#define EXYNOS_CIOCBSA19 (0x2a8)
/* CB 20th frame start address for output DMA */
#define EXYNOS_CIOCBSA20 (0x2ac)
/* CB 21th frame start address for output DMA */
#define EXYNOS_CIOCBSA21 (0x2b0)
/* CB 22th frame start address for output DMA */
#define EXYNOS_CIOCBSA22 (0x2b4)
/* CB 23th frame start address for output DMA */
#define EXYNOS_CIOCBSA23 (0x2b8)
/* CB 24th frame start address for output DMA */
#define EXYNOS_CIOCBSA24 (0x2bc)
/* CB 25th frame start address for output DMA */
#define EXYNOS_CIOCBSA25 (0x2c0)
/* CB 26th frame start address for output DMA */
#define EXYNOS_CIOCBSA26 (0x2c4)
/* CB 27th frame start address for output DMA */
#define EXYNOS_CIOCBSA27 (0x2c8)
/* CB 28th frame start address for output DMA */
#define EXYNOS_CIOCBSA28 (0x2cc)
/* CB 29th frame start address for output DMA */
#define EXYNOS_CIOCBSA29 (0x2d0)
/* CB 30th frame start address for output DMA */
#define EXYNOS_CIOCBSA30 (0x2d4)
/* CB 31th frame start address for output DMA */
#define EXYNOS_CIOCBSA31 (0x2d8)
/* CB 32th frame start address for output DMA */
#define EXYNOS_CIOCBSA32 (0x2dc)
/* CR 5th frame start address for output DMA */
#define EXYNOS_CIOCRSA5 (0x2e0)
/* CR 6th frame start address for output DMA */
#define EXYNOS_CIOCRSA6 (0x2e4)
/* CR 7th frame start address for output DMA */
#define EXYNOS_CIOCRSA7 (0x2e8)
/* CR 8th frame start address for output DMA */
#define EXYNOS_CIOCRSA8 (0x2ec)
/* CR 9th frame start address for output DMA */
#define EXYNOS_CIOCRSA9 (0x2f0)
/* CR 10th frame start address for output DMA */
#define EXYNOS_CIOCRSA10 (0x2f4)
/* CR 11th frame start address for output DMA */
#define EXYNOS_CIOCRSA11 (0x2f8)
/* CR 12th frame start address for output DMA */
#define EXYNOS_CIOCRSA12 (0x2fc)
/* CR 13th frame start address for output DMA */
#define EXYNOS_CIOCRSA13 (0x300)
/* CR 14th frame start address for output DMA */
#define EXYNOS_CIOCRSA14 (0x304)
/* CR 15th frame start address for output DMA */
#define EXYNOS_CIOCRSA15 (0x308)
/* CR 16th frame start address for output DMA */
#define EXYNOS_CIOCRSA16 (0x30c)
/* CR 17th frame start address for output DMA */
#define EXYNOS_CIOCRSA17 (0x310)
/* CR 18th frame start address for output DMA */
#define EXYNOS_CIOCRSA18 (0x314)
/* CR 19th frame start address for output DMA */
#define EXYNOS_CIOCRSA19 (0x318)
/* CR 20th frame start address for output DMA */
#define EXYNOS_CIOCRSA20 (0x31c)
/* CR 21th frame start address for output DMA */
#define EXYNOS_CIOCRSA21 (0x320)
/* CR 22th frame start address for output DMA */
#define EXYNOS_CIOCRSA22 (0x324)
/* CR 23th frame start address for output DMA */
#define EXYNOS_CIOCRSA23 (0x328)
/* CR 24th frame start address for output DMA */
#define EXYNOS_CIOCRSA24 (0x32c)
/* CR 25th frame start address for output DMA */
#define EXYNOS_CIOCRSA25 (0x330)
/* CR 26th frame start address for output DMA */
#define EXYNOS_CIOCRSA26 (0x334)
/* CR 27th frame start address for output DMA */
#define EXYNOS_CIOCRSA27 (0x338)
/* CR 28th frame start address for output DMA */
#define EXYNOS_CIOCRSA28 (0x33c)
/* CR 29th frame start address for output DMA */
#define EXYNOS_CIOCRSA29 (0x340)
/* CR 30th frame start address for output DMA */
#define EXYNOS_CIOCRSA30 (0x344)
/* CR 31th frame start address for output DMA */
#define EXYNOS_CIOCRSA31 (0x348)
/* CR 32th frame start address for output DMA */
#define EXYNOS_CIOCRSA32 (0x34c)
/*
* Macro part
*/
/* frame start address 1 ~ 4, 5 ~ 32 */
/* Number of Default PingPong Memory */
#define DEF_PP 4
#define EXYNOS_CIOYSA(__x) \
(((__x) < DEF_PP) ? \
(EXYNOS_CIOYSA1 + (__x) * 4) : \
(EXYNOS_CIOYSA5 + ((__x) - DEF_PP) * 4))
#define EXYNOS_CIOCBSA(__x) \
(((__x) < DEF_PP) ? \
(EXYNOS_CIOCBSA1 + (__x) * 4) : \
(EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
#define EXYNOS_CIOCRSA(__x) \
(((__x) < DEF_PP) ? \
(EXYNOS_CIOCRSA1 + (__x) * 4) : \
(EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
/* Number of Default PingPong Memory */
#define DEF_IPP 1
#define EXYNOS_CIIYSA(__x) \
(((__x) < DEF_IPP) ? \
(EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
#define EXYNOS_CIICBSA(__x) \
(((__x) < DEF_IPP) ? \
(EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
#define EXYNOS_CIICRSA(__x) \
(((__x) < DEF_IPP) ? \
(EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
#define EXYNOS_CISRCFMT_SOURCEHSIZE(x) ((x) << 16)
#define EXYNOS_CISRCFMT_SOURCEVSIZE(x) ((x) << 0)
#define EXYNOS_CIWDOFST_WINHOROFST(x) ((x) << 16)
#define EXYNOS_CIWDOFST_WINVEROFST(x) ((x) << 0)
#define EXYNOS_CIWDOFST2_WINHOROFST2(x) ((x) << 16)
#define EXYNOS_CIWDOFST2_WINVEROFST2(x) ((x) << 0)
#define EXYNOS_CITRGFMT_TARGETHSIZE(x) (((x) & 0x1fff) << 16)
#define EXYNOS_CITRGFMT_TARGETVSIZE(x) (((x) & 0x1fff) << 0)
#define EXYNOS_CISCPRERATIO_SHFACTOR(x) ((x) << 28)
#define EXYNOS_CISCPRERATIO_PREHORRATIO(x) ((x) << 16)
#define EXYNOS_CISCPRERATIO_PREVERRATIO(x) ((x) << 0)
#define EXYNOS_CISCPREDST_PREDSTWIDTH(x) ((x) << 16)
#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x) ((x) << 0)
#define EXYNOS_CISCCTRL_MAINHORRATIO(x) ((x) << 16)
#define EXYNOS_CISCCTRL_MAINVERRATIO(x) ((x) << 0)
#define EXYNOS_CITAREA_TARGET_AREA(x) ((x) << 0)
#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x) (((x) >> 26) & 0x3)
#define EXYNOS_CISTATUS_GET_FRAME_END(x) (((x) >> 17) & 0x1)
#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x) (((x) >> 16) & 0x1)
#define EXYNOS_CISTATUS_GET_LCD_STATUS(x) (((x) >> 9) & 0x1)
#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x) (((x) >> 8) & 0x1)
#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x) (((x) >> 7) & 0x3f)
#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x) ((x) & 0x3f)
#define EXYNOS_CIIMGEFF_FIN(x) ((x & 0x7) << 26)
#define EXYNOS_CIIMGEFF_PAT_CB(x) ((x) << 13)
#define EXYNOS_CIIMGEFF_PAT_CR(x) ((x) << 0)
#define EXYNOS_CIILINESKIP(x) (((x) & 0xf) << 24)
#define EXYNOS_CIREAL_ISIZE_HEIGHT(x) ((x) << 16)
#define EXYNOS_CIREAL_ISIZE_WIDTH(x) ((x) << 0)
#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x) ((x) << 24)
#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x) ((x) & 0x1)
#define EXYNOS_CIOYOFF_VERTICAL(x) ((x) << 16)
#define EXYNOS_CIOYOFF_HORIZONTAL(x) ((x) << 0)
#define EXYNOS_CIOCBOFF_VERTICAL(x) ((x) << 16)
#define EXYNOS_CIOCBOFF_HORIZONTAL(x) ((x) << 0)
#define EXYNOS_CIOCROFF_VERTICAL(x) ((x) << 16)
#define EXYNOS_CIOCROFF_HORIZONTAL(x) ((x) << 0)
#define EXYNOS_CIIYOFF_VERTICAL(x) ((x) << 16)
#define EXYNOS_CIIYOFF_HORIZONTAL(x) ((x) << 0)
#define EXYNOS_CIICBOFF_VERTICAL(x) ((x) << 16)
#define EXYNOS_CIICBOFF_HORIZONTAL(x) ((x) << 0)
#define EXYNOS_CIICROFF_VERTICAL(x) ((x) << 16)
#define EXYNOS_CIICROFF_HORIZONTAL(x) ((x) << 0)
#define EXYNOS_ORGISIZE_VERTICAL(x) ((x) << 16)
#define EXYNOS_ORGISIZE_HORIZONTAL(x) ((x) << 0)
#define EXYNOS_ORGOSIZE_VERTICAL(x) ((x) << 16)
#define EXYNOS_ORGOSIZE_HORIZONTAL(x) ((x) << 0)
#define EXYNOS_CIEXTEN_TARGETH_EXT(x) ((((x) & 0x2000) >> 13) << 26)
#define EXYNOS_CIEXTEN_TARGETV_EXT(x) ((((x) & 0x2000) >> 13) << 24)
#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x) (((x) & 0x3F) << 10)
#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x) ((x) & 0x3F)
/*
* Bit definition part
*/
/* Source format register */
#define EXYNOS_CISRCFMT_ITU601_8BIT (1 << 31)
#define EXYNOS_CISRCFMT_ITU656_8BIT (0 << 31)
#define EXYNOS_CISRCFMT_ITU601_16BIT (1 << 29)
#define EXYNOS_CISRCFMT_ORDER422_YCBYCR (0 << 14)
#define EXYNOS_CISRCFMT_ORDER422_YCRYCB (1 << 14)
#define EXYNOS_CISRCFMT_ORDER422_CBYCRY (2 << 14)
#define EXYNOS_CISRCFMT_ORDER422_CRYCBY (3 << 14)
/* ITU601 16bit only */
#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR (0 << 14)
/* ITU601 16bit only */
#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB (1 << 14)
/* Window offset register */
#define EXYNOS_CIWDOFST_WINOFSEN (1 << 31)
#define EXYNOS_CIWDOFST_CLROVFIY (1 << 30)
#define EXYNOS_CIWDOFST_CLROVRLB (1 << 29)
#define EXYNOS_CIWDOFST_WINHOROFST_MASK (0x7ff << 16)
#define EXYNOS_CIWDOFST_CLROVFICB (1 << 15)
#define EXYNOS_CIWDOFST_CLROVFICR (1 << 14)
#define EXYNOS_CIWDOFST_WINVEROFST_MASK (0xfff << 0)
/* Global control register */
#define EXYNOS_CIGCTRL_SWRST (1 << 31)
#define EXYNOS_CIGCTRL_CAMRST_A (1 << 30)
#define EXYNOS_CIGCTRL_SELCAM_ITU_B (0 << 29)
#define EXYNOS_CIGCTRL_SELCAM_ITU_A (1 << 29)
#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK (1 << 29)
#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
#define EXYNOS_CIGCTRL_TESTPATTERN_MASK (3 << 27)
#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT (27)
#define EXYNOS_CIGCTRL_INVPOLPCLK (1 << 26)
#define EXYNOS_CIGCTRL_INVPOLVSYNC (1 << 25)
#define EXYNOS_CIGCTRL_INVPOLHREF (1 << 24)
#define EXYNOS_CIGCTRL_IRQ_OVFEN (1 << 22)
#define EXYNOS_CIGCTRL_HREF_MASK (1 << 21)
#define EXYNOS_CIGCTRL_IRQ_EDGE (0 << 20)
#define EXYNOS_CIGCTRL_IRQ_LEVEL (1 << 20)
#define EXYNOS_CIGCTRL_IRQ_CLR (1 << 19)
#define EXYNOS_CIGCTRL_IRQ_END_DISABLE (1 << 18)
#define EXYNOS_CIGCTRL_IRQ_DISABLE (0 << 16)
#define EXYNOS_CIGCTRL_IRQ_ENABLE (1 << 16)
#define EXYNOS_CIGCTRL_SHADOW_DISABLE (1 << 12)
#define EXYNOS_CIGCTRL_CAM_JPEG (1 << 8)
#define EXYNOS_CIGCTRL_SELCAM_MIPI_B (0 << 7)
#define EXYNOS_CIGCTRL_SELCAM_MIPI_A (1 << 7)
#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK (1 << 7)
#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA (0 << 6)
#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK (1 << 6)
#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK (1 << 10)
#define EXYNOS_CIGCTRL_SELWRITEBACK_A (1 << 10)
#define EXYNOS_CIGCTRL_SELWRITEBACK_B (0 << 10)
#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK (1 << 6)
#define EXYNOS_CIGCTRL_CSC_ITU601 (0 << 5)
#define EXYNOS_CIGCTRL_CSC_ITU709 (1 << 5)
#define EXYNOS_CIGCTRL_CSC_MASK (1 << 5)
#define EXYNOS_CIGCTRL_INVPOLHSYNC (1 << 4)
#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU (0 << 3)
#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI (1 << 3)
#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK (1 << 3)
#define EXYNOS_CIGCTRL_PROGRESSIVE (0 << 0)
#define EXYNOS_CIGCTRL_INTERLACE (1 << 0)
/* Window offset2 register */
#define EXYNOS_CIWDOFST_WINHOROFST2_MASK (0xfff << 16)
#define EXYNOS_CIWDOFST_WINVEROFST2_MASK (0xfff << 16)
/* Target format register */
#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE (1 << 31)
#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29)
#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29)
#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE (2 << 29)
#define EXYNOS_CITRGFMT_OUTFORMAT_RGB (3 << 29)
#define EXYNOS_CITRGFMT_OUTFORMAT_MASK (3 << 29)
#define EXYNOS_CITRGFMT_FLIP_SHIFT (14)
#define EXYNOS_CITRGFMT_FLIP_NORMAL (0 << 14)
#define EXYNOS_CITRGFMT_FLIP_X_MIRROR (1 << 14)
#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
#define EXYNOS_CITRGFMT_FLIP_180 (3 << 14)
#define EXYNOS_CITRGFMT_FLIP_MASK (3 << 14)
#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE (1 << 13)
#define EXYNOS_CITRGFMT_TARGETV_MASK (0x1fff << 0)
#define EXYNOS_CITRGFMT_TARGETH_MASK (0x1fff << 16)
/* Output DMA control register */
#define EXYNOS_CIOCTRL_WEAVE_OUT (1 << 31)
#define EXYNOS_CIOCTRL_WEAVE_MASK (1 << 31)
#define EXYNOS_CIOCTRL_LASTENDEN (1 << 30)
#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR (0 << 24)
#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB (1 << 24)
#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB (2 << 24)
#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR (3 << 24)
#define EXYNOS_CIOCTRL_ORDER2P_SHIFT (24)
#define EXYNOS_CIOCTRL_ORDER2P_MASK (3 << 24)
#define EXYNOS_CIOCTRL_YCBCR_3PLANE (0 << 3)
#define EXYNOS_CIOCTRL_YCBCR_2PLANE (1 << 3)
#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
#define EXYNOS_CIOCTRL_ALPHA_OUT (0xff << 4)
#define EXYNOS_CIOCTRL_ORDER422_YCBYCR (0 << 0)
#define EXYNOS_CIOCTRL_ORDER422_YCRYCB (1 << 0)
#define EXYNOS_CIOCTRL_ORDER422_CBYCRY (2 << 0)
#define EXYNOS_CIOCTRL_ORDER422_CRYCBY (3 << 0)
#define EXYNOS_CIOCTRL_ORDER422_MASK (3 << 0)
/* Main scaler control register */
#define EXYNOS_CISCCTRL_SCALERBYPASS (1 << 31)
#define EXYNOS_CISCCTRL_SCALEUP_H (1 << 30)
#define EXYNOS_CISCCTRL_SCALEUP_V (1 << 29)
#define EXYNOS_CISCCTRL_CSCR2Y_NARROW (0 << 28)
#define EXYNOS_CISCCTRL_CSCR2Y_WIDE (1 << 28)
#define EXYNOS_CISCCTRL_CSCY2R_NARROW (0 << 27)
#define EXYNOS_CISCCTRL_CSCY2R_WIDE (1 << 27)
#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
#define EXYNOS_CISCCTRL_PROGRESSIVE (0 << 25)
#define EXYNOS_CISCCTRL_INTERLACE (1 << 25)
#define EXYNOS_CISCCTRL_SCAN_MASK (1 << 25)
#define EXYNOS_CISCCTRL_SCALERSTART (1 << 15)
#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK (3 << 13)
#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK (3 << 11)
#define EXYNOS_CISCCTRL_EXTRGB_NORMAL (0 << 10)
#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION (1 << 10)
#define EXYNOS_CISCCTRL_ONE2ONE (1 << 9)
#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK (0x1ff << 0)
#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK (0x1ff << 16)
/* Status register */
#define EXYNOS_CISTATUS_OVFIY (1 << 31)
#define EXYNOS_CISTATUS_OVFICB (1 << 30)
#define EXYNOS_CISTATUS_OVFICR (1 << 29)
#define EXYNOS_CISTATUS_VSYNC (1 << 28)
#define EXYNOS_CISTATUS_SCALERSTART (1 << 26)
#define EXYNOS_CISTATUS_WINOFSTEN (1 << 25)
#define EXYNOS_CISTATUS_IMGCPTEN (1 << 22)
#define EXYNOS_CISTATUS_IMGCPTENSC (1 << 21)
#define EXYNOS_CISTATUS_VSYNC_A (1 << 20)
#define EXYNOS_CISTATUS_VSYNC_B (1 << 19)
#define EXYNOS_CISTATUS_OVRLB (1 << 18)
#define EXYNOS_CISTATUS_FRAMEEND (1 << 17)
#define EXYNOS_CISTATUS_LASTCAPTUREEND (1 << 16)
#define EXYNOS_CISTATUS_VVALID_A (1 << 15)
#define EXYNOS_CISTATUS_VVALID_B (1 << 14)
/* Image capture enable register */
#define EXYNOS_CIIMGCPT_IMGCPTEN (1 << 31)
#define EXYNOS_CIIMGCPT_IMGCPTEN_SC (1 << 30)
#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN (0 << 18)
#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
/* Image effects register */
#define EXYNOS_CIIMGEFF_IE_DISABLE (0 << 30)
#define EXYNOS_CIIMGEFF_IE_ENABLE (1 << 30)
#define EXYNOS_CIIMGEFF_IE_SC_BEFORE (0 << 29)
#define EXYNOS_CIIMGEFF_IE_SC_AFTER (1 << 29)
#define EXYNOS_CIIMGEFF_FIN_BYPASS (0 << 26)
#define EXYNOS_CIIMGEFF_FIN_ARBITRARY (1 << 26)
#define EXYNOS_CIIMGEFF_FIN_NEGATIVE (2 << 26)
#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
/* Real input DMA size register */
#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE (1 << 30)
#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK (0x3FFF << 16)
#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK (0x3FFF << 0)
/* Input DMA control register */
#define EXYNOS_MSCTRL_FIELD_MASK (1 << 31)
#define EXYNOS_MSCTRL_FIELD_WEAVE (1 << 31)
#define EXYNOS_MSCTRL_FIELD_NORMAL (0 << 31)
#define EXYNOS_MSCTRL_BURST_CNT (24)
#define EXYNOS_MSCTRL_BURST_CNT_MASK (0xf << 24)
#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR (0 << 16)
#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB (1 << 16)
#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB (2 << 16)
#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR (3 << 16)
#define EXYNOS_MSCTRL_ORDER2P_SHIFT (16)
#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK (0x3 << 16)
#define EXYNOS_MSCTRL_C_INT_IN_3PLANE (0 << 15)
#define EXYNOS_MSCTRL_C_INT_IN_2PLANE (1 << 15)
#define EXYNOS_MSCTRL_FLIP_SHIFT (13)
#define EXYNOS_MSCTRL_FLIP_NORMAL (0 << 13)
#define EXYNOS_MSCTRL_FLIP_X_MIRROR (1 << 13)
#define EXYNOS_MSCTRL_FLIP_Y_MIRROR (2 << 13)
#define EXYNOS_MSCTRL_FLIP_180 (3 << 13)
#define EXYNOS_MSCTRL_FLIP_MASK (3 << 13)
#define EXYNOS_MSCTRL_ORDER422_CRYCBY (0 << 4)
#define EXYNOS_MSCTRL_ORDER422_YCRYCB (1 << 4)
#define EXYNOS_MSCTRL_ORDER422_CBYCRY (2 << 4)
#define EXYNOS_MSCTRL_ORDER422_YCBYCR (3 << 4)
#define EXYNOS_MSCTRL_INPUT_EXTCAM (0 << 3)
#define EXYNOS_MSCTRL_INPUT_MEMORY (1 << 3)
#define EXYNOS_MSCTRL_INPUT_MASK (1 << 3)
#define EXYNOS_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
#define EXYNOS_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE (2 << 1)
#define EXYNOS_MSCTRL_INFORMAT_RGB (3 << 1)
#define EXYNOS_MSCTRL_ENVID (1 << 0)
/* DMA parameter register */
#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR (0 << 29)
#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE (1 << 29)
#define EXYNOS_CIDMAPARAM_R_MODE_16X16 (2 << 29)
#define EXYNOS_CIDMAPARAM_R_MODE_64X32 (3 << 29)
#define EXYNOS_CIDMAPARAM_R_MODE_MASK (3 << 29)
#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64 (0 << 24)
#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128 (1 << 24)
#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256 (2 << 24)
#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512 (3 << 24)
#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024 (4 << 24)
#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048 (5 << 24)
#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096 (6 << 24)
#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1 (0 << 20)
#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2 (1 << 20)
#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4 (2 << 20)
#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8 (3 << 20)
#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16 (4 << 20)
#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32 (5 << 20)
#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR (0 << 13)
#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE (1 << 13)
#define EXYNOS_CIDMAPARAM_W_MODE_16X16 (2 << 13)
#define EXYNOS_CIDMAPARAM_W_MODE_64X32 (3 << 13)
#define EXYNOS_CIDMAPARAM_W_MODE_MASK (3 << 13)
#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64 (0 << 8)
#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128 (1 << 8)
#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256 (2 << 8)
#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512 (3 << 8)
#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024 (4 << 8)
#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048 (5 << 8)
#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096 (6 << 8)
#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1 (0 << 4)
#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2 (1 << 4)
#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4 (2 << 4)
#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8 (3 << 4)
#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16 (4 << 4)
#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32 (5 << 4)
/* Gathering Extension register */
#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK (1 << 26)
#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK (1 << 24)
#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK (0x3F << 10)
#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK (0x3F)
#define EXYNOS_CIEXTEN_YUV444_OUT (1 << 22)
/* FIMC Clock Source Select register */
#define EXYNOS_CLKSRC_HCLK (0 << 1)
#define EXYNOS_CLKSRC_HCLK_MASK (1 << 1)
#define EXYNOS_CLKSRC_SCLK (1 << 1)
/* SYSREG for FIMC writeback */
#define SYSREG_CAMERA_BLK (S3C_VA_SYS + 0x0218)
#define SYSREG_ISP_BLK (S3C_VA_SYS + 0x020c)
#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
#define SYSREG_FIMD0WB_DEST_SHIFT 23
#endif /* EXYNOS_REGS_FIMC_H */

View File

@ -0,0 +1,284 @@
/* linux/drivers/gpu/drm/exynos/regs-gsc.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Register definition file for Samsung G-Scaler driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef EXYNOS_REGS_GSC_H_
#define EXYNOS_REGS_GSC_H_
/* G-Scaler enable */
#define GSC_ENABLE 0x00
#define GSC_ENABLE_PP_UPDATE_TIME_MASK (1 << 9)
#define GSC_ENABLE_PP_UPDATE_TIME_CURR (0 << 9)
#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS (1 << 9)
#define GSC_ENABLE_CLK_GATE_MODE_MASK (1 << 8)
#define GSC_ENABLE_CLK_GATE_MODE_FREE (1 << 8)
#define GSC_ENABLE_IPC_MODE_MASK (1 << 7)
#define GSC_ENABLE_NORM_MODE (0 << 7)
#define GSC_ENABLE_IPC_MODE (1 << 7)
#define GSC_ENABLE_PP_UPDATE_MODE_MASK (1 << 6)
#define GSC_ENABLE_PP_UPDATE_FIRE_MODE (1 << 6)
#define GSC_ENABLE_IN_PP_UPDATE (1 << 5)
#define GSC_ENABLE_ON_CLEAR_MASK (1 << 4)
#define GSC_ENABLE_ON_CLEAR_ONESHOT (1 << 4)
#define GSC_ENABLE_QOS_ENABLE (1 << 3)
#define GSC_ENABLE_OP_STATUS (1 << 2)
#define GSC_ENABLE_SFR_UPDATE (1 << 1)
#define GSC_ENABLE_ON (1 << 0)
/* G-Scaler S/W reset */
#define GSC_SW_RESET 0x04
#define GSC_SW_RESET_SRESET (1 << 0)
/* G-Scaler IRQ */
#define GSC_IRQ 0x08
#define GSC_IRQ_STATUS_OR_IRQ (1 << 17)
#define GSC_IRQ_STATUS_OR_FRM_DONE (1 << 16)
#define GSC_IRQ_OR_MASK (1 << 2)
#define GSC_IRQ_FRMDONE_MASK (1 << 1)
#define GSC_IRQ_ENABLE (1 << 0)
/* G-Scaler input control */
#define GSC_IN_CON 0x10
#define GSC_IN_CHROM_STRIDE_SEL_MASK (1 << 20)
#define GSC_IN_CHROM_STRIDE_SEPAR (1 << 20)
#define GSC_IN_RB_SWAP_MASK (1 << 19)
#define GSC_IN_RB_SWAP (1 << 19)
#define GSC_IN_ROT_MASK (7 << 16)
#define GSC_IN_ROT_270 (7 << 16)
#define GSC_IN_ROT_90_YFLIP (6 << 16)
#define GSC_IN_ROT_90_XFLIP (5 << 16)
#define GSC_IN_ROT_90 (4 << 16)
#define GSC_IN_ROT_180 (3 << 16)
#define GSC_IN_ROT_YFLIP (2 << 16)
#define GSC_IN_ROT_XFLIP (1 << 16)
#define GSC_IN_RGB_TYPE_MASK (3 << 14)
#define GSC_IN_RGB_HD_WIDE (3 << 14)
#define GSC_IN_RGB_HD_NARROW (2 << 14)
#define GSC_IN_RGB_SD_WIDE (1 << 14)
#define GSC_IN_RGB_SD_NARROW (0 << 14)
#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
#define GSC_IN_CHROMA_ORDER_MASK (1 << 12)
#define GSC_IN_CHROMA_ORDER_CBCR (0 << 12)
#define GSC_IN_CHROMA_ORDER_CRCB (1 << 12)
#define GSC_IN_FORMAT_MASK (7 << 8)
#define GSC_IN_XRGB8888 (0 << 8)
#define GSC_IN_RGB565 (1 << 8)
#define GSC_IN_YUV420_2P (2 << 8)
#define GSC_IN_YUV420_3P (3 << 8)
#define GSC_IN_YUV422_1P (4 << 8)
#define GSC_IN_YUV422_2P (5 << 8)
#define GSC_IN_YUV422_3P (6 << 8)
#define GSC_IN_TILE_TYPE_MASK (1 << 4)
#define GSC_IN_TILE_C_16x8 (0 << 4)
#define GSC_IN_TILE_C_16x16 (1 << 4)
#define GSC_IN_TILE_MODE (1 << 3)
#define GSC_IN_LOCAL_SEL_MASK (3 << 1)
#define GSC_IN_LOCAL_CAM3 (3 << 1)
#define GSC_IN_LOCAL_FIMD_WB (2 << 1)
#define GSC_IN_LOCAL_CAM1 (1 << 1)
#define GSC_IN_LOCAL_CAM0 (0 << 1)
#define GSC_IN_PATH_MASK (1 << 0)
#define GSC_IN_PATH_LOCAL (1 << 0)
#define GSC_IN_PATH_MEMORY (0 << 0)
/* G-Scaler source image size */
#define GSC_SRCIMG_SIZE 0x14
#define GSC_SRCIMG_HEIGHT_MASK (0x1fff << 16)
#define GSC_SRCIMG_HEIGHT(x) ((x) << 16)
#define GSC_SRCIMG_WIDTH_MASK (0x3fff << 0)
#define GSC_SRCIMG_WIDTH(x) ((x) << 0)
/* G-Scaler source image offset */
#define GSC_SRCIMG_OFFSET 0x18
#define GSC_SRCIMG_OFFSET_Y_MASK (0x1fff << 16)
#define GSC_SRCIMG_OFFSET_Y(x) ((x) << 16)
#define GSC_SRCIMG_OFFSET_X_MASK (0x1fff << 0)
#define GSC_SRCIMG_OFFSET_X(x) ((x) << 0)
/* G-Scaler cropped source image size */
#define GSC_CROPPED_SIZE 0x1C
#define GSC_CROPPED_HEIGHT_MASK (0x1fff << 16)
#define GSC_CROPPED_HEIGHT(x) ((x) << 16)
#define GSC_CROPPED_WIDTH_MASK (0x1fff << 0)
#define GSC_CROPPED_WIDTH(x) ((x) << 0)
/* G-Scaler output control */
#define GSC_OUT_CON 0x20
#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
#define GSC_OUT_CHROM_STRIDE_SEL_MASK (1 << 13)
#define GSC_OUT_CHROM_STRIDE_SEPAR (1 << 13)
#define GSC_OUT_RB_SWAP_MASK (1 << 12)
#define GSC_OUT_RB_SWAP (1 << 12)
#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
#define GSC_OUT_RGB_HD_NARROW (3 << 10)
#define GSC_OUT_RGB_HD_WIDE (2 << 10)
#define GSC_OUT_RGB_SD_NARROW (1 << 10)
#define GSC_OUT_RGB_SD_WIDE (0 << 10)
#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
#define GSC_OUT_CHROMA_ORDER_MASK (1 << 8)
#define GSC_OUT_CHROMA_ORDER_CBCR (0 << 8)
#define GSC_OUT_CHROMA_ORDER_CRCB (1 << 8)
#define GSC_OUT_FORMAT_MASK (7 << 4)
#define GSC_OUT_XRGB8888 (0 << 4)
#define GSC_OUT_RGB565 (1 << 4)
#define GSC_OUT_YUV420_2P (2 << 4)
#define GSC_OUT_YUV420_3P (3 << 4)
#define GSC_OUT_YUV422_1P (4 << 4)
#define GSC_OUT_YUV422_2P (5 << 4)
#define GSC_OUT_YUV444 (7 << 4)
#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
#define GSC_OUT_TILE_C_16x8 (0 << 2)
#define GSC_OUT_TILE_C_16x16 (1 << 2)
#define GSC_OUT_TILE_MODE (1 << 1)
#define GSC_OUT_PATH_MASK (1 << 0)
#define GSC_OUT_PATH_LOCAL (1 << 0)
#define GSC_OUT_PATH_MEMORY (0 << 0)
/* G-Scaler scaled destination image size */
#define GSC_SCALED_SIZE 0x24
#define GSC_SCALED_HEIGHT_MASK (0x1fff << 16)
#define GSC_SCALED_HEIGHT(x) ((x) << 16)
#define GSC_SCALED_WIDTH_MASK (0x1fff << 0)
#define GSC_SCALED_WIDTH(x) ((x) << 0)
/* G-Scaler pre scale ratio */
#define GSC_PRE_SCALE_RATIO 0x28
#define GSC_PRESC_SHFACTOR_MASK (7 << 28)
#define GSC_PRESC_SHFACTOR(x) ((x) << 28)
#define GSC_PRESC_V_RATIO_MASK (7 << 16)
#define GSC_PRESC_V_RATIO(x) ((x) << 16)
#define GSC_PRESC_H_RATIO_MASK (7 << 0)
#define GSC_PRESC_H_RATIO(x) ((x) << 0)
/* G-Scaler main scale horizontal ratio */
#define GSC_MAIN_H_RATIO 0x2C
#define GSC_MAIN_H_RATIO_MASK (0xfffff << 0)
#define GSC_MAIN_H_RATIO_VALUE(x) ((x) << 0)
/* G-Scaler main scale vertical ratio */
#define GSC_MAIN_V_RATIO 0x30
#define GSC_MAIN_V_RATIO_MASK (0xfffff << 0)
#define GSC_MAIN_V_RATIO_VALUE(x) ((x) << 0)
/* G-Scaler input chrominance stride */
#define GSC_IN_CHROM_STRIDE 0x3C
#define GSC_IN_CHROM_STRIDE_MASK (0x3fff << 0)
#define GSC_IN_CHROM_STRIDE_VALUE(x) ((x) << 0)
/* G-Scaler destination image size */
#define GSC_DSTIMG_SIZE 0x40
#define GSC_DSTIMG_HEIGHT_MASK (0x1fff << 16)
#define GSC_DSTIMG_HEIGHT(x) ((x) << 16)
#define GSC_DSTIMG_WIDTH_MASK (0x1fff << 0)
#define GSC_DSTIMG_WIDTH(x) ((x) << 0)
/* G-Scaler destination image offset */
#define GSC_DSTIMG_OFFSET 0x44
#define GSC_DSTIMG_OFFSET_Y_MASK (0x1fff << 16)
#define GSC_DSTIMG_OFFSET_Y(x) ((x) << 16)
#define GSC_DSTIMG_OFFSET_X_MASK (0x1fff << 0)
#define GSC_DSTIMG_OFFSET_X(x) ((x) << 0)
/* G-Scaler output chrominance stride */
#define GSC_OUT_CHROM_STRIDE 0x48
#define GSC_OUT_CHROM_STRIDE_MASK (0x3fff << 0)
#define GSC_OUT_CHROM_STRIDE_VALUE(x) ((x) << 0)
/* G-Scaler input y address mask */
#define GSC_IN_BASE_ADDR_Y_MASK 0x4C
/* G-Scaler input y base address */
#define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4)
/* G-Scaler input y base current address */
#define GSC_IN_BASE_ADDR_Y_CUR(n) (0x60 + (n) * 0x4)
/* G-Scaler input cb address mask */
#define GSC_IN_BASE_ADDR_CB_MASK 0x7C
/* G-Scaler input cb base address */
#define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4)
/* G-Scaler input cb base current address */
#define GSC_IN_BASE_ADDR_CB_CUR(n) (0x90 + (n) * 0x4)
/* G-Scaler input cr address mask */
#define GSC_IN_BASE_ADDR_CR_MASK 0xAC
/* G-Scaler input cr base address */
#define GSC_IN_BASE_ADDR_CR(n) (0xB0 + (n) * 0x4)
/* G-Scaler input cr base current address */
#define GSC_IN_BASE_ADDR_CR_CUR(n) (0xC0 + (n) * 0x4)
/* G-Scaler input address mask */
#define GSC_IN_CURR_ADDR_INDEX (0xf << 24)
#define GSC_IN_CURR_GET_INDEX(x) ((x) >> 24)
#define GSC_IN_BASE_ADDR_PINGPONG(x) ((x) << 16)
#define GSC_IN_BASE_ADDR_MASK (0xff << 0)
/* G-Scaler output y address mask */
#define GSC_OUT_BASE_ADDR_Y_MASK 0x10C
/* G-Scaler output y base address */
#define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4)
/* G-Scaler output cb address mask */
#define GSC_OUT_BASE_ADDR_CB_MASK 0x15C
/* G-Scaler output cb base address */
#define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4)
/* G-Scaler output cr address mask */
#define GSC_OUT_BASE_ADDR_CR_MASK 0x1AC
/* G-Scaler output cr base address */
#define GSC_OUT_BASE_ADDR_CR(n) (0x1B0 + (n) * 0x4)
/* G-Scaler output address mask */
#define GSC_OUT_CURR_ADDR_INDEX (0xf << 24)
#define GSC_OUT_CURR_GET_INDEX(x) ((x) >> 24)
#define GSC_OUT_BASE_ADDR_PINGPONG(x) ((x) << 16)
#define GSC_OUT_BASE_ADDR_MASK (0xffff << 0)
/* G-Scaler horizontal scaling filter */
#define GSC_HCOEF(n, s, x) (0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
/* G-Scaler vertical scaling filter */
#define GSC_VCOEF(n, s, x) (0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
/* G-Scaler BUS control */
#define GSC_BUSCON 0xA78
#define GSC_BUSCON_INT_TIME_MASK (1 << 8)
#define GSC_BUSCON_INT_DATA_TRANS (0 << 8)
#define GSC_BUSCON_INT_AXI_RESPONSE (1 << 8)
#define GSC_BUSCON_AWCACHE(x) ((x) << 4)
#define GSC_BUSCON_ARCACHE(x) ((x) << 0)
/* G-Scaler V position */
#define GSC_VPOSITION 0xA7C
#define GSC_VPOS_F(x) ((x) << 0)
/* G-Scaler clock initial count */
#define GSC_CLK_INIT_COUNT 0xC00
#define GSC_CLK_GATE_MODE_INIT_CNT(x) ((x) << 0)
/* G-Scaler clock snoop count */
#define GSC_CLK_SNOOP_COUNT 0xC04
#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
/* SYSCON. GSCBLK_CFG */
#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224)
#define GSC_BLK_DISP1WB_DEST(x) (x << 10)
#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x))
#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x))
#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000)
#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
#endif /* EXYNOS_REGS_GSC_H_ */

View File

@ -176,6 +176,11 @@
#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C) #define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C)
#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080) #define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080)
/* PHY Control bit definition */
/* HDMI_PHY_CON_0 */
#define HDMI_PHY_POWER_OFF_EN (1 << 0)
/* Video related registers */ /* Video related registers */
#define HDMI_YMAX HDMI_CORE_BASE(0x0060) #define HDMI_YMAX HDMI_CORE_BASE(0x0060)
#define HDMI_YMIN HDMI_CORE_BASE(0x0064) #define HDMI_YMIN HDMI_CORE_BASE(0x0064)
@ -298,14 +303,14 @@
#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714) #define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718) #define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C) #define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n)) #define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1))
#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800) #define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810) #define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814) #define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818) #define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C) #define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n)) #define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1))
#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900) #define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C) #define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
@ -338,6 +343,19 @@
#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60) #define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64) #define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
/* AVI bit definition */
#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1)
#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1)
#define AVI_ACTIVE_FORMAT_VALID (1 << 4)
#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
/* AUI bit definition */
#define HDMI_AUI_CON_NO_TRAN (0 << 0)
/* VSI bit definition */
#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
/* HDCP related registers */ /* HDCP related registers */
#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n)) #define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n)) #define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))

View File

@ -0,0 +1,73 @@
/* drivers/gpu/drm/exynos/regs-rotator.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Register definition file for Samsung Rotator Interface (Rotator) driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef EXYNOS_REGS_ROTATOR_H
#define EXYNOS_REGS_ROTATOR_H
/* Configuration */
#define ROT_CONFIG 0x00
#define ROT_CONFIG_IRQ (3 << 8)
/* Image Control */
#define ROT_CONTROL 0x10
#define ROT_CONTROL_PATTERN_WRITE (1 << 16)
#define ROT_CONTROL_FMT_YCBCR420_2P (1 << 8)
#define ROT_CONTROL_FMT_RGB888 (6 << 8)
#define ROT_CONTROL_FMT_MASK (7 << 8)
#define ROT_CONTROL_FLIP_VERTICAL (2 << 6)
#define ROT_CONTROL_FLIP_HORIZONTAL (3 << 6)
#define ROT_CONTROL_FLIP_MASK (3 << 6)
#define ROT_CONTROL_ROT_90 (1 << 4)
#define ROT_CONTROL_ROT_180 (2 << 4)
#define ROT_CONTROL_ROT_270 (3 << 4)
#define ROT_CONTROL_ROT_MASK (3 << 4)
#define ROT_CONTROL_START (1 << 0)
/* Status */
#define ROT_STATUS 0x20
#define ROT_STATUS_IRQ_PENDING(x) (1 << (x))
#define ROT_STATUS_IRQ(x) (((x) >> 8) & 0x3)
#define ROT_STATUS_IRQ_VAL_COMPLETE 1
#define ROT_STATUS_IRQ_VAL_ILLEGAL 2
/* Buffer Address */
#define ROT_SRC_BUF_ADDR(n) (0x30 + ((n) << 2))
#define ROT_DST_BUF_ADDR(n) (0x50 + ((n) << 2))
/* Buffer Size */
#define ROT_SRC_BUF_SIZE 0x3c
#define ROT_DST_BUF_SIZE 0x5c
#define ROT_SET_BUF_SIZE_H(x) ((x) << 16)
#define ROT_SET_BUF_SIZE_W(x) ((x) << 0)
#define ROT_GET_BUF_SIZE_H(x) ((x) >> 16)
#define ROT_GET_BUF_SIZE_W(x) ((x) & 0xffff)
/* Crop Position */
#define ROT_SRC_CROP_POS 0x40
#define ROT_DST_CROP_POS 0x60
#define ROT_CROP_POS_Y(x) ((x) << 16)
#define ROT_CROP_POS_X(x) ((x) << 0)
/* Source Crop Size */
#define ROT_SRC_CROP_SIZE 0x44
#define ROT_SRC_CROP_SIZE_H(x) ((x) << 16)
#define ROT_SRC_CROP_SIZE_W(x) ((x) << 0)
/* Round to nearest aligned value */
#define ROT_ALIGN(x, align, mask) (((x) + (1 << ((align) - 1))) & (mask))
/* Minimum limit value */
#define ROT_MIN(min, mask) (((min) + ~(mask)) & (mask))
/* Maximum limit value */
#define ROT_MAX(max, mask) ((max) & (mask))
#endif /* EXYNOS_REGS_ROTATOR_H */

View File

@ -523,7 +523,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
dev_priv->force_audio_property = prop; dev_priv->force_audio_property = prop;
} }
drm_connector_attach_property(connector, prop, 0); drm_object_attach_property(&connector->base, prop, 0);
} }
@ -553,7 +553,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
dev_priv->broadcast_rgb_property = prop; dev_priv->broadcast_rgb_property = prop;
} }
drm_connector_attach_property(connector, prop, 0); drm_object_attach_property(&connector->base, prop, 0);
} }
/* Cedarview */ /* Cedarview */

View File

@ -1650,7 +1650,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
struct cdv_intel_dp *intel_dp = encoder->dev_priv; struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret; int ret;
ret = drm_connector_property_set_value(connector, property, val); ret = drm_object_property_set_value(&connector->base, property, val);
if (ret) if (ret)
return ret; return ret;

View File

@ -185,14 +185,14 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
return -1; return -1;
} }
if (drm_connector_property_get_value(connector, if (drm_object_property_get_value(&connector->base,
property, &curValue)) property, &curValue))
return -1; return -1;
if (curValue == value) if (curValue == value)
return 0; return 0;
if (drm_connector_property_set_value(connector, if (drm_object_property_set_value(&connector->base,
property, value)) property, value))
return -1; return -1;
@ -341,7 +341,7 @@ void cdv_hdmi_init(struct drm_device *dev,
connector->interlace_allowed = false; connector->interlace_allowed = false;
connector->doublescan_allowed = false; connector->doublescan_allowed = false;
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property, dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN); DRM_MODE_SCALE_FULLSCREEN);

View File

@ -479,7 +479,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
return -1; return -1;
} }
if (drm_connector_property_get_value(connector, if (drm_object_property_get_value(&connector->base,
property, property,
&curValue)) &curValue))
return -1; return -1;
@ -487,7 +487,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
if (curValue == value) if (curValue == value)
return 0; return 0;
if (drm_connector_property_set_value(connector, if (drm_object_property_set_value(&connector->base,
property, property,
value)) value))
return -1; return -1;
@ -502,7 +502,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
return -1; return -1;
} }
} else if (!strcmp(property->name, "backlight") && encoder) { } else if (!strcmp(property->name, "backlight") && encoder) {
if (drm_connector_property_set_value(connector, if (drm_object_property_set_value(&connector->base,
property, property,
value)) value))
return -1; return -1;
@ -671,10 +671,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
connector->doublescan_allowed = false; connector->doublescan_allowed = false;
/*Attach connector properties*/ /*Attach connector properties*/
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property, dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN); DRM_MODE_SCALE_FULLSCREEN);
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
dev_priv->backlight_property, dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL); BRIGHTNESS_MAX_LEVEL);

View File

@ -265,13 +265,13 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
goto set_prop_error; goto set_prop_error;
} }
if (drm_connector_property_get_value(connector, property, &val)) if (drm_object_property_get_value(&connector->base, property, &val))
goto set_prop_error; goto set_prop_error;
if (val == value) if (val == value)
goto set_prop_done; goto set_prop_done;
if (drm_connector_property_set_value(connector, if (drm_object_property_set_value(&connector->base,
property, value)) property, value))
goto set_prop_error; goto set_prop_error;
@ -296,7 +296,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
} }
} }
} else if (!strcmp(property->name, "backlight") && encoder) { } else if (!strcmp(property->name, "backlight") && encoder) {
if (drm_connector_property_set_value(connector, property, if (drm_object_property_set_value(&connector->base, property,
value)) value))
goto set_prop_error; goto set_prop_error;
else else
@ -506,7 +506,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe); dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
if (!dev || ((pipe != 0) && (pipe != 2))) { if (pipe != 0 && pipe != 2) {
DRM_ERROR("Invalid parameter\n"); DRM_ERROR("Invalid parameter\n");
return; return;
} }
@ -572,10 +572,10 @@ void mdfld_dsi_output_init(struct drm_device *dev,
connector->doublescan_allowed = false; connector->doublescan_allowed = false;
/*attach properties*/ /*attach properties*/
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property, dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN); DRM_MODE_SCALE_FULLSCREEN);
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
dev_priv->backlight_property, dev_priv->backlight_property,
MDFLD_DSI_BRIGHTNESS_MAX_LEVEL); MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);

View File

@ -820,7 +820,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->pos, 0); REG_WRITE(map->pos, 0);
if (psb_intel_encoder) if (psb_intel_encoder)
drm_connector_property_get_value(connector, drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType); dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) { if (scalingType == DRM_MODE_SCALE_NO_SCALE) {

View File

@ -249,3 +249,9 @@ extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
extern void oaktrail_hdmi_save(struct drm_device *dev); extern void oaktrail_hdmi_save(struct drm_device *dev);
extern void oaktrail_hdmi_restore(struct drm_device *dev); extern void oaktrail_hdmi_restore(struct drm_device *dev);
extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev); extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
extern int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb);
extern void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode);

View File

@ -168,6 +168,11 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
const struct psb_offset *map = &dev_priv->regmap[pipe]; const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp; u32 temp;
if (pipe == 1) {
oaktrail_crtc_hdmi_dpms(crtc, mode);
return;
}
if (!gma_power_begin(dev, true)) if (!gma_power_begin(dev, true))
return; return;
@ -302,6 +307,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_connector *connector; struct drm_connector *connector;
if (pipe == 1)
return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
if (!gma_power_begin(dev, true)) if (!gma_power_begin(dev, true))
return 0; return 0;
@ -343,7 +351,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
(mode->crtc_vdisplay - 1)); (mode->crtc_vdisplay - 1));
if (psb_intel_encoder) if (psb_intel_encoder)
drm_connector_property_get_value(connector, drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType); dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) { if (scalingType == DRM_MODE_SCALE_NO_SCALE) {

View File

@ -544,7 +544,7 @@ const struct psb_ops oaktrail_chip_ops = {
.accel_2d = 1, .accel_2d = 1,
.pipes = 2, .pipes = 2,
.crtcs = 2, .crtcs = 2,
.hdmi_mask = (1 << 0), .hdmi_mask = (1 << 1),
.lvds_mask = (1 << 0), .lvds_mask = (1 << 0),
.cursor_needs_phys = 0, .cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET, .sgx_offset = MRST_SGX_OFFSET,

View File

@ -155,6 +155,345 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
HDMI_READ(HDMI_HCR); HDMI_READ(HDMI_HCR);
} }
static void wait_for_vblank(struct drm_device *dev)
{
/* Wait for 20ms, i.e. one cycle at 50hz. */
mdelay(20);
}
static unsigned int htotal_calculate(struct drm_display_mode *mode)
{
u32 htotal, new_crtc_htotal;
htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
/*
* 1024 x 768 new_crtc_htotal = 0x1024;
* 1280 x 1024 new_crtc_htotal = 0x0c34;
*/
new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
DRM_DEBUG_KMS("new crtc htotal 0x%4x\n", new_crtc_htotal);
return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
}
static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
int refclk, struct oaktrail_hdmi_clock *best_clock)
{
int np_min, np_max, nr_min, nr_max;
int np, nr, nf;
np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
if (np_min < oaktrail_hdmi_limit.np.min)
np_min = oaktrail_hdmi_limit.np.min;
if (np_max > oaktrail_hdmi_limit.np.max)
np_max = oaktrail_hdmi_limit.np.max;
nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
if (nr_min < oaktrail_hdmi_limit.nr.min)
nr_min = oaktrail_hdmi_limit.nr.min;
if (nr_max > oaktrail_hdmi_limit.nr.max)
nr_max = oaktrail_hdmi_limit.nr.max;
np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
/*
* 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
* 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
*/
best_clock->np = np;
best_clock->nr = nr - 1;
best_clock->nf = (nf << 14);
}
static void scu_busy_loop(void __iomem *scu_base)
{
u32 status = 0;
u32 loop_count = 0;
status = readl(scu_base + 0x04);
while (status & 1) {
udelay(1); /* scu processing time is in few u secods */
status = readl(scu_base + 0x04);
loop_count++;
/* break if scu doesn't reset busy bit after huge retry */
if (loop_count > 1000) {
DRM_DEBUG_KMS("SCU IPC timed out");
return;
}
}
}
/*
* You don't want to know, you really really don't want to know....
*
* This is magic. However it's safe magic because of the way the platform
* works and it is necessary magic.
*/
static void oaktrail_hdmi_reset(struct drm_device *dev)
{
void __iomem *base;
unsigned long scu_ipc_mmio = 0xff11c000UL;
int scu_len = 1024;
base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
if (base == NULL) {
DRM_ERROR("failed to map scu mmio\n");
return;
}
/* scu ipc: assert hdmi controller reset */
writel(0xff11d118, base + 0x0c);
writel(0x7fffffdf, base + 0x80);
writel(0x42005, base + 0x0);
scu_busy_loop(base);
/* scu ipc: de-assert hdmi controller reset */
writel(0xff11d118, base + 0x0c);
writel(0x7fffffff, base + 0x80);
writel(0x42005, base + 0x0);
scu_busy_loop(base);
iounmap(base);
}
int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
int pipe = 1;
int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
int refclk;
struct oaktrail_hdmi_clock clock;
u32 dspcntr, pipeconf, dpll, temp;
int dspcntr_reg = DSPBCNTR;
if (!gma_power_begin(dev, true))
return 0;
/* Disable the VGA plane that we never use */
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable dpll if necessary */
dpll = REG_READ(DPLL_CTRL);
if ((dpll & DPLL_PWRDN) == 0) {
REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
REG_WRITE(DPLL_STATUS, 0x1);
}
udelay(150);
/* Reset controller */
oaktrail_hdmi_reset(dev);
/* program and enable dpll */
refclk = 25000;
oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
/* Set the DPLL */
dpll = REG_READ(DPLL_CTRL);
dpll &= ~DPLL_PDIV_MASK;
dpll &= ~(DPLL_PWRDN | DPLL_RESET);
REG_WRITE(DPLL_CTRL, 0x00000008);
REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
REG_WRITE(DPLL_UPDATE, 0x80000000);
REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
udelay(150);
/* configure HDMI */
HDMI_WRITE(0x1004, 0x1fd);
HDMI_WRITE(0x2000, 0x1);
HDMI_WRITE(0x2008, 0x0);
HDMI_WRITE(0x3130, 0x8);
HDMI_WRITE(0x101c, 0x1800810);
temp = htotal_calculate(adjusted_mode);
REG_WRITE(htot_reg, temp);
REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
REG_WRITE(PCH_PIPEBSRC, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
REG_WRITE(dsppos_reg, 0);
/* Flush the plane changes */
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
/* Set up the display plane register */
dspcntr = REG_READ(dspcntr_reg);
dspcntr |= DISPPLANE_GAMMA_ENABLE;
dspcntr |= DISPPLANE_SEL_PIPE_B;
dspcntr |= DISPLAY_PLANE_ENABLE;
/* setup pipeconf */
pipeconf = REG_READ(pipeconf_reg);
pipeconf |= PIPEACONF_ENABLE;
REG_WRITE(pipeconf_reg, pipeconf);
REG_READ(pipeconf_reg);
REG_WRITE(PCH_PIPEBCONF, pipeconf);
REG_READ(PCH_PIPEBCONF);
wait_for_vblank(dev);
REG_WRITE(dspcntr_reg, dspcntr);
wait_for_vblank(dev);
gma_power_end(dev);
return 0;
}
void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
u32 temp;
DRM_DEBUG_KMS("%s %d\n", __func__, mode);
switch (mode) {
case DRM_MODE_DPMS_OFF:
REG_WRITE(VGACNTRL, 0x80000000);
/* Disable plane */
temp = REG_READ(DSPBCNTR);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
REG_READ(DSPBCNTR);
/* Flush the plane changes */
REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
REG_READ(DSPBSURF);
}
/* Disable pipe B */
temp = REG_READ(PIPEBCONF);
if ((temp & PIPEACONF_ENABLE) != 0) {
REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
REG_READ(PIPEBCONF);
}
/* Disable LNW Pipes, etc */
temp = REG_READ(PCH_PIPEBCONF);
if ((temp & PIPEACONF_ENABLE) != 0) {
REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
REG_READ(PCH_PIPEBCONF);
}
/* wait for pipe off */
udelay(150);
/* Disable dpll */
temp = REG_READ(DPLL_CTRL);
if ((temp & DPLL_PWRDN) == 0) {
REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
REG_WRITE(DPLL_STATUS, 0x1);
}
/* wait for dpll off */
udelay(150);
break;
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable dpll */
temp = REG_READ(DPLL_CTRL);
if ((temp & DPLL_PWRDN) != 0) {
REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
temp = REG_READ(DPLL_CLK_ENABLE);
REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
REG_READ(DPLL_CLK_ENABLE);
}
/* wait for dpll warm up */
udelay(150);
/* Enable pipe B */
temp = REG_READ(PIPEBCONF);
if ((temp & PIPEACONF_ENABLE) == 0) {
REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
REG_READ(PIPEBCONF);
}
/* Enable LNW Pipe B */
temp = REG_READ(PCH_PIPEBCONF);
if ((temp & PIPEACONF_ENABLE) == 0) {
REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
REG_READ(PCH_PIPEBCONF);
}
wait_for_vblank(dev);
/* Enable plane */
temp = REG_READ(DSPBCNTR);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
REG_READ(DSPBSURF);
}
psb_intel_crtc_load_lut(crtc);
}
/* DSPARB */
REG_WRITE(DSPARB, 0x00003fbf);
/* FW1 */
REG_WRITE(0x70034, 0x3f880a0a);
/* FW2 */
REG_WRITE(0x70038, 0x0b060808);
/* FW4 */
REG_WRITE(0x70050, 0x08030404);
/* FW5 */
REG_WRITE(0x70054, 0x04040404);
/* LNC Chicken Bits - Squawk! */
REG_WRITE(0x70400, 0x4000);
return;
}
static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode) static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
{ {
static int dpms_mode = -1; static int dpms_mode = -1;
@ -233,13 +572,15 @@ static const unsigned char raw_edid[] = {
static int oaktrail_hdmi_get_modes(struct drm_connector *connector) static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct i2c_adapter *i2c_adap; struct i2c_adapter *i2c_adap;
struct edid *edid; struct edid *edid;
struct drm_display_mode *mode, *t; int ret = 0;
int i = 0, ret = 0;
/*
* FIXME: We need to figure this lot out. In theory we can
* read the EDID somehow but I've yet to find working reference
* code.
*/
i2c_adap = i2c_get_adapter(3); i2c_adap = i2c_get_adapter(3);
if (i2c_adap == NULL) { if (i2c_adap == NULL) {
DRM_ERROR("No ddc adapter available!\n"); DRM_ERROR("No ddc adapter available!\n");
@ -253,17 +594,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid); drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid); ret = drm_add_edid_modes(connector, edid);
} }
return ret;
/*
* prune modes that require frame buffer bigger than stolen mem
*/
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
i++;
drm_mode_remove(connector, mode);
}
}
return ret - i;
} }
static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder, static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
@ -349,6 +680,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
connector->interlace_allowed = false; connector->interlace_allowed = false;
connector->doublescan_allowed = false; connector->doublescan_allowed = false;
drm_sysfs_connector_add(connector); drm_sysfs_connector_add(connector);
dev_info(dev->dev, "HDMI initialised.\n");
return; return;
@ -403,6 +735,9 @@ void oaktrail_hdmi_setup(struct drm_device *dev)
dev_priv->hdmi_priv = hdmi_dev; dev_priv->hdmi_priv = hdmi_dev;
oaktrail_hdmi_audio_disable(dev); oaktrail_hdmi_audio_disable(dev);
dev_info(dev->dev, "HDMI hardware present.\n");
return; return;
free: free:

View File

@ -133,8 +133,8 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
return; return;
} }
drm_connector_property_get_value( drm_object_property_get_value(
connector, &connector->base,
dev->mode_config.scaling_mode_property, dev->mode_config.scaling_mode_property,
&v); &v);
@ -363,10 +363,10 @@ void oaktrail_lvds_init(struct drm_device *dev,
connector->interlace_allowed = false; connector->interlace_allowed = false;
connector->doublescan_allowed = false; connector->doublescan_allowed = false;
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property, dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN); DRM_MODE_SCALE_FULLSCREEN);
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
dev_priv->backlight_property, dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL); BRIGHTNESS_MAX_LEVEL);

View File

@ -603,7 +603,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
goto set_prop_error; goto set_prop_error;
} }
if (drm_connector_property_get_value(connector, if (drm_object_property_get_value(&connector->base,
property, property,
&curval)) &curval))
goto set_prop_error; goto set_prop_error;
@ -611,7 +611,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
if (curval == value) if (curval == value)
goto set_prop_done; goto set_prop_done;
if (drm_connector_property_set_value(connector, if (drm_object_property_set_value(&connector->base,
property, property,
value)) value))
goto set_prop_error; goto set_prop_error;
@ -626,7 +626,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
goto set_prop_error; goto set_prop_error;
} }
} else if (!strcmp(property->name, "backlight")) { } else if (!strcmp(property->name, "backlight")) {
if (drm_connector_property_set_value(connector, if (drm_object_property_set_value(&connector->base,
property, property,
value)) value))
goto set_prop_error; goto set_prop_error;
@ -746,10 +746,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
connector->doublescan_allowed = false; connector->doublescan_allowed = false;
/*Attach connector properties*/ /*Attach connector properties*/
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property, dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN); DRM_MODE_SCALE_FULLSCREEN);
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
dev_priv->backlight_property, dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL); BRIGHTNESS_MAX_LEVEL);

View File

@ -1694,7 +1694,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
uint8_t cmd; uint8_t cmd;
int ret; int ret;
ret = drm_connector_property_set_value(connector, property, val); ret = drm_object_property_set_value(&connector->base, property, val);
if (ret) if (ret)
return ret; return ret;
@ -1749,7 +1749,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
} else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) { } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
temp_value = val; temp_value = val;
if (psb_intel_sdvo_connector->left == property) { if (psb_intel_sdvo_connector->left == property) {
drm_connector_property_set_value(connector, drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->right, val); psb_intel_sdvo_connector->right, val);
if (psb_intel_sdvo_connector->left_margin == temp_value) if (psb_intel_sdvo_connector->left_margin == temp_value)
return 0; return 0;
@ -1761,7 +1761,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H; cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value; goto set_value;
} else if (psb_intel_sdvo_connector->right == property) { } else if (psb_intel_sdvo_connector->right == property) {
drm_connector_property_set_value(connector, drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->left, val); psb_intel_sdvo_connector->left, val);
if (psb_intel_sdvo_connector->right_margin == temp_value) if (psb_intel_sdvo_connector->right_margin == temp_value)
return 0; return 0;
@ -1773,7 +1773,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H; cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value; goto set_value;
} else if (psb_intel_sdvo_connector->top == property) { } else if (psb_intel_sdvo_connector->top == property) {
drm_connector_property_set_value(connector, drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->bottom, val); psb_intel_sdvo_connector->bottom, val);
if (psb_intel_sdvo_connector->top_margin == temp_value) if (psb_intel_sdvo_connector->top_margin == temp_value)
return 0; return 0;
@ -1785,7 +1785,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_V; cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value; goto set_value;
} else if (psb_intel_sdvo_connector->bottom == property) { } else if (psb_intel_sdvo_connector->bottom == property) {
drm_connector_property_set_value(connector, drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->top, val); psb_intel_sdvo_connector->top, val);
if (psb_intel_sdvo_connector->bottom_margin == temp_value) if (psb_intel_sdvo_connector->bottom_margin == temp_value)
return 0; return 0;
@ -2286,7 +2286,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]); i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0]; psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
drm_connector_attach_property(&psb_intel_sdvo_connector->base.base, drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
psb_intel_sdvo_connector->tv_format, 0); psb_intel_sdvo_connector->tv_format, 0);
return true; return true;
@ -2302,7 +2302,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
psb_intel_sdvo_connector->name = \ psb_intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!psb_intel_sdvo_connector->name) return false; \ if (!psb_intel_sdvo_connector->name) return false; \
drm_connector_attach_property(connector, \ drm_object_attach_property(&connector->base, \
psb_intel_sdvo_connector->name, \ psb_intel_sdvo_connector->name, \
psb_intel_sdvo_connector->cur_##name); \ psb_intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@ -2339,7 +2339,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->left) if (!psb_intel_sdvo_connector->left)
return false; return false;
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->left, psb_intel_sdvo_connector->left,
psb_intel_sdvo_connector->left_margin); psb_intel_sdvo_connector->left_margin);
@ -2348,7 +2348,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->right) if (!psb_intel_sdvo_connector->right)
return false; return false;
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->right, psb_intel_sdvo_connector->right,
psb_intel_sdvo_connector->right_margin); psb_intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, " DRM_DEBUG_KMS("h_overscan: max %d, "
@ -2375,7 +2375,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->top) if (!psb_intel_sdvo_connector->top)
return false; return false;
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->top, psb_intel_sdvo_connector->top,
psb_intel_sdvo_connector->top_margin); psb_intel_sdvo_connector->top_margin);
@ -2384,7 +2384,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->bottom) if (!psb_intel_sdvo_connector->bottom)
return false; return false;
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->bottom, psb_intel_sdvo_connector->bottom,
psb_intel_sdvo_connector->bottom_margin); psb_intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, " DRM_DEBUG_KMS("v_overscan: max %d, "
@ -2416,7 +2416,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
if (!psb_intel_sdvo_connector->dot_crawl) if (!psb_intel_sdvo_connector->dot_crawl)
return false; return false;
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->dot_crawl, psb_intel_sdvo_connector->dot_crawl,
psb_intel_sdvo_connector->cur_dot_crawl); psb_intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response); DRM_DEBUG_KMS("dot crawl: current %d\n", response);

View File

@ -214,7 +214,7 @@ static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encod
else else
priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
drm_connector_property_set_value(connector, drm_object_property_set_value(&connector->base,
encoder->dev->mode_config.tv_subconnector_property, encoder->dev->mode_config.tv_subconnector_property,
priv->subconnector); priv->subconnector);
@ -254,23 +254,23 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2); priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
drm_connector_attach_property(connector, conf->tv_select_subconnector_property, drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
priv->select_subconnector); priv->select_subconnector);
drm_connector_attach_property(connector, conf->tv_subconnector_property, drm_object_attach_property(&connector->base, conf->tv_subconnector_property,
priv->subconnector); priv->subconnector);
drm_connector_attach_property(connector, conf->tv_left_margin_property, drm_object_attach_property(&connector->base, conf->tv_left_margin_property,
priv->hmargin); priv->hmargin);
drm_connector_attach_property(connector, conf->tv_bottom_margin_property, drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
priv->vmargin); priv->vmargin);
drm_connector_attach_property(connector, conf->tv_mode_property, drm_object_attach_property(&connector->base, conf->tv_mode_property,
priv->norm); priv->norm);
drm_connector_attach_property(connector, conf->tv_brightness_property, drm_object_attach_property(&connector->base, conf->tv_brightness_property,
priv->brightness); priv->brightness);
drm_connector_attach_property(connector, conf->tv_contrast_property, drm_object_attach_property(&connector->base, conf->tv_contrast_property,
priv->contrast); priv->contrast);
drm_connector_attach_property(connector, conf->tv_flicker_reduction_property, drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property,
priv->flicker); priv->flicker);
drm_connector_attach_property(connector, priv->scale_property, drm_object_attach_property(&connector->base, priv->scale_property,
priv->scale); priv->scale);
return 0; return 0;

View File

@ -317,7 +317,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "No flip due on pipe %c (plane %c)\n", seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane); pipe, plane);
} else { } else {
if (!work->pending) { if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
seq_printf(m, "Flip queued on pipe %c (plane %c)\n", seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane); pipe, plane);
} else { } else {
@ -328,7 +328,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Stall check enabled, "); seq_printf(m, "Stall check enabled, ");
else else
seq_printf(m, "Stall check waiting for page flip ioctl, "); seq_printf(m, "Stall check waiting for page flip ioctl, ");
seq_printf(m, "%d prepares\n", work->pending); seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (work->old_fb_obj) { if (work->old_fb_obj) {
struct drm_i915_gem_object *obj = work->old_fb_obj; struct drm_i915_gem_object *obj = work->old_fb_obj;
@ -655,10 +655,12 @@ static void i915_ring_error_state(struct seq_file *m,
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
seq_printf(m, " SYNC_0: 0x%08x\n", seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
error->semaphore_mboxes[ring][0]); error->semaphore_mboxes[ring][0],
seq_printf(m, " SYNC_1: 0x%08x\n", error->semaphore_seqno[ring][0]);
error->semaphore_mboxes[ring][1]); seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
error->semaphore_mboxes[ring][1],
error->semaphore_seqno[ring][1]);
} }
seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
@ -1068,7 +1070,7 @@ static int gen6_drpc_info(struct seq_file *m)
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, gt_core_status, rcctl1; u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
unsigned forcewake_count; unsigned forcewake_count;
int count=0, ret; int count=0, ret;
@ -1097,6 +1099,9 @@ static int gen6_drpc_info(struct seq_file *m)
rpmodectl1 = I915_READ(GEN6_RP_CONTROL); rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL); rcctl1 = I915_READ(GEN6_RC_CONTROL);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
mutex_lock(&dev_priv->rps.hw_lock);
sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
mutex_unlock(&dev_priv->rps.hw_lock);
seq_printf(m, "Video Turbo Mode: %s\n", seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
@ -1148,6 +1153,12 @@ static int gen6_drpc_info(struct seq_file *m)
seq_printf(m, "RC6++ residency since boot: %u\n", seq_printf(m, "RC6++ residency since boot: %u\n",
I915_READ(GEN6_GT_GFX_RC6pp)); I915_READ(GEN6_GT_GFX_RC6pp));
seq_printf(m, "RC6 voltage: %dmV\n",
GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
seq_printf(m, "RC6+ voltage: %dmV\n",
GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
seq_printf(m, "RC6++ voltage: %dmV\n",
GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
return 0; return 0;
} }
@ -1273,7 +1284,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0; return 0;
} }
ret = mutex_lock_interruptible(&dev->struct_mutex); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret) if (ret)
return ret; return ret;
@ -1282,19 +1293,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
for (gpu_freq = dev_priv->rps.min_delay; for (gpu_freq = dev_priv->rps.min_delay;
gpu_freq <= dev_priv->rps.max_delay; gpu_freq <= dev_priv->rps.max_delay;
gpu_freq++) { gpu_freq++) {
I915_WRITE(GEN6_PCODE_DATA, gpu_freq); ia_freq = gpu_freq;
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | sandybridge_pcode_read(dev_priv,
GEN6_PCODE_READ_MIN_FREQ_TABLE); GEN6_PCODE_READ_MIN_FREQ_TABLE,
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & &ia_freq);
GEN6_PCODE_READY) == 0, 10)) {
DRM_ERROR("pcode read of freq table timed out\n");
continue;
}
ia_freq = I915_READ(GEN6_PCODE_DATA);
seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->rps.hw_lock);
return 0; return 0;
} }
@ -1398,15 +1404,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret) if (ret)
return ret; return ret;
if (dev_priv->pwrctx) { if (dev_priv->ips.pwrctx) {
seq_printf(m, "power context "); seq_printf(m, "power context ");
describe_obj(m, dev_priv->pwrctx); describe_obj(m, dev_priv->ips.pwrctx);
seq_printf(m, "\n"); seq_printf(m, "\n");
} }
if (dev_priv->renderctx) { if (dev_priv->ips.renderctx) {
seq_printf(m, "render context "); seq_printf(m, "render context ");
describe_obj(m, dev_priv->renderctx); describe_obj(m, dev_priv->ips.renderctx);
seq_printf(m, "\n"); seq_printf(m, "\n");
} }
@ -1711,13 +1717,13 @@ i915_max_freq_read(struct file *filp,
if (!(IS_GEN6(dev) || IS_GEN7(dev))) if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV; return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret) if (ret)
return ret; return ret;
len = snprintf(buf, sizeof(buf), len = snprintf(buf, sizeof(buf),
"max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER); "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->rps.hw_lock);
if (len > sizeof(buf)) if (len > sizeof(buf))
len = sizeof(buf); len = sizeof(buf);
@ -1752,7 +1758,7 @@ i915_max_freq_write(struct file *filp,
DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
ret = mutex_lock_interruptible(&dev->struct_mutex); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret) if (ret)
return ret; return ret;
@ -1762,7 +1768,7 @@ i915_max_freq_write(struct file *filp,
dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->rps.hw_lock);
return cnt; return cnt;
} }
@ -1787,13 +1793,13 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
if (!(IS_GEN6(dev) || IS_GEN7(dev))) if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV; return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret) if (ret)
return ret; return ret;
len = snprintf(buf, sizeof(buf), len = snprintf(buf, sizeof(buf),
"min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER); "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->rps.hw_lock);
if (len > sizeof(buf)) if (len > sizeof(buf))
len = sizeof(buf); len = sizeof(buf);
@ -1826,7 +1832,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
ret = mutex_lock_interruptible(&dev->struct_mutex); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret) if (ret)
return ret; return ret;
@ -1836,7 +1842,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER; dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->rps.hw_lock);
return cnt; return cnt;
} }

View File

@ -103,32 +103,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
I915_WRITE(HWS_PGA, addr); I915_WRITE(HWS_PGA, addr);
} }
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
*/
static int i915_init_phys_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
0, PAGE_SIZE);
i915_write_hws_pga(dev);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
/** /**
* Frees the hardware status page, whether it's a physical address or a virtual * Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server. * address set up by the X Server.
@ -167,7 +141,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8); ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
if (ring->space < 0) if (ring->space < 0)
ring->space += ring->size; ring->space += ring->size;
@ -451,16 +425,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
dev_priv->counter++; dev_priv->dri1.counter++;
if (dev_priv->counter > 0x7FFFFFFFUL) if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
dev_priv->counter = 0; dev_priv->dri1.counter = 0;
if (master_priv->sarea_priv) if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter; master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
if (BEGIN_LP_RING(4) == 0) { if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter); OUT_RING(dev_priv->dri1.counter);
OUT_RING(0); OUT_RING(0);
ADVANCE_LP_RING(); ADVANCE_LP_RING();
} }
@ -602,12 +576,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
ADVANCE_LP_RING(); ADVANCE_LP_RING();
master_priv->sarea_priv->last_enqueue = dev_priv->counter++; master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
if (BEGIN_LP_RING(4) == 0) { if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter); OUT_RING(dev_priv->dri1.counter);
OUT_RING(0); OUT_RING(0);
ADVANCE_LP_RING(); ADVANCE_LP_RING();
} }
@ -618,10 +592,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
static int i915_quiescent(struct drm_device *dev) static int i915_quiescent(struct drm_device *dev)
{ {
struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
i915_kernel_lost_context(dev); i915_kernel_lost_context(dev);
return intel_wait_ring_idle(ring); return intel_ring_idle(LP_RING(dev->dev_private));
} }
static int i915_flush_ioctl(struct drm_device *dev, void *data, static int i915_flush_ioctl(struct drm_device *dev, void *data,
@ -775,21 +747,21 @@ static int i915_emit_irq(struct drm_device * dev)
DRM_DEBUG_DRIVER("\n"); DRM_DEBUG_DRIVER("\n");
dev_priv->counter++; dev_priv->dri1.counter++;
if (dev_priv->counter > 0x7FFFFFFFUL) if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
dev_priv->counter = 1; dev_priv->dri1.counter = 1;
if (master_priv->sarea_priv) if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter; master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
if (BEGIN_LP_RING(4) == 0) { if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter); OUT_RING(dev_priv->dri1.counter);
OUT_RING(MI_USER_INTERRUPT); OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING(); ADVANCE_LP_RING();
} }
return dev_priv->counter; return dev_priv->dri1.counter;
} }
static int i915_wait_irq(struct drm_device * dev, int irq_nr) static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@ -820,7 +792,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (ret == -EBUSY) { if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
} }
return ret; return ret;
@ -1014,6 +986,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PRIME_VMAP_FLUSH: case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1; value = 1;
break; break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
break;
default: default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n", DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param); param->param);
@ -1068,7 +1043,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data; drm_i915_hws_addr_t *hws = data;
struct intel_ring_buffer *ring = LP_RING(dev_priv); struct intel_ring_buffer *ring;
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV; return -ENODEV;
@ -1088,6 +1063,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
ring = LP_RING(dev_priv);
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->dri1.gfx_hws_cpu_addr = dev_priv->dri1.gfx_hws_cpu_addr =
@ -1326,6 +1302,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev); intel_modeset_gem_init(dev);
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
ret = drm_irq_install(dev); ret = drm_irq_install(dev);
if (ret) if (ret)
goto cleanup_gem; goto cleanup_gem;
@ -1491,19 +1469,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv; goto free_priv;
} }
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); ret = i915_gem_gtt_init(dev);
if (!ret) { if (ret)
DRM_ERROR("failed to set up gmch\n");
ret = -EIO;
goto put_bridge; goto put_bridge;
}
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
goto put_gmch;
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_kick_out_firmware_fb(dev_priv); i915_kick_out_firmware_fb(dev_priv);
@ -1590,18 +1558,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_setup_gmbus(dev); intel_setup_gmbus(dev);
intel_opregion_setup(dev); intel_opregion_setup(dev);
/* Make sure the bios did its job and set up vital registers */
intel_setup_bios(dev); intel_setup_bios(dev);
i915_gem_load(dev); i915_gem_load(dev);
/* Init HWS */
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_phys_hws(dev);
if (ret)
goto out_gem_unload;
}
/* On the 945G/GM, the chipset reports the MSI capability on the /* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there * integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function * according to the published specs. It doesn't appear to function
@ -1621,6 +1581,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->rps.lock); spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->dpio_lock); spin_lock_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3; dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev)) else if (IS_MOBILE(dev) || !IS_GEN2(dev))
@ -1678,7 +1640,7 @@ out_mtrrfree:
out_rmmap: out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs); pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch: put_gmch:
intel_gmch_remove(); i915_gem_gtt_fini(dev);
put_bridge: put_bridge:
pci_dev_put(dev_priv->bridge_dev); pci_dev_put(dev_priv->bridge_dev);
free_priv: free_priv:
@ -1721,6 +1683,7 @@ int i915_driver_unload(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev); intel_fbdev_fini(dev);
intel_modeset_cleanup(dev); intel_modeset_cleanup(dev);
cancel_work_sync(&dev_priv->console_resume_work);
/* /*
* free the memory space allocated for the child device * free the memory space allocated for the child device

View File

@ -47,11 +47,11 @@ MODULE_PARM_DESC(modeset,
unsigned int i915_fbpercrtc __always_unused = 0; unsigned int i915_fbpercrtc __always_unused = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
int i915_panel_ignore_lid __read_mostly = 0; int i915_panel_ignore_lid __read_mostly = 1;
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
MODULE_PARM_DESC(panel_ignore_lid, MODULE_PARM_DESC(panel_ignore_lid,
"Override lid status (0=autodetect [default], 1=lid open, " "Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=lid closed)"); "-1=force lid closed, -2=force lid open)");
unsigned int i915_powersave __read_mostly = 1; unsigned int i915_powersave __read_mostly = 1;
module_param_named(powersave, i915_powersave, int, 0600); module_param_named(powersave, i915_powersave, int, 0600);
@ -396,12 +396,6 @@ static const struct pci_device_id pciidlist[] = { /* aka */
MODULE_DEVICE_TABLE(pci, pciidlist); MODULE_DEVICE_TABLE(pci, pciidlist);
#endif #endif
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
void intel_detect_pch(struct drm_device *dev) void intel_detect_pch(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -416,26 +410,36 @@ void intel_detect_pch(struct drm_device *dev)
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (pch) { if (pch) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) { if (pch->vendor == PCI_VENDOR_ID_INTEL) {
int id; unsigned short id;
id = pch->device & INTEL_PCH_DEVICE_ID_MASK; id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX; dev_priv->pch_type = PCH_IBX;
dev_priv->num_pch_pll = 2; dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT; dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2; dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n"); DRM_DEBUG_KMS("Found CougarPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */ /* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT; dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2; dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n"); DRM_DEBUG_KMS("Found PatherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT; dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0; dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n"); DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
} }
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
} }
@ -477,6 +481,8 @@ static int i915_drm_freeze(struct drm_device *dev)
return error; return error;
} }
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
intel_modeset_disable(dev); intel_modeset_disable(dev);
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
@ -526,24 +532,29 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
return 0; return 0;
} }
static int i915_drm_thaw(struct drm_device *dev) void intel_console_resume(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
console_resume_work);
struct drm_device *dev = dev_priv->dev;
console_lock();
intel_fbdev_set_suspend(dev, 0);
console_unlock();
}
static int __i915_drm_thaw(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0; int error = 0;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
i915_restore_state(dev); i915_restore_state(dev);
intel_opregion_setup(dev); intel_opregion_setup(dev);
/* KMS EnterVT equivalent */ /* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) intel_init_pch_refclk(dev);
ironlake_init_pch_refclk(dev);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0; dev_priv->mm.suspended = 0;
@ -552,8 +563,7 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
intel_modeset_init_hw(dev); intel_modeset_init_hw(dev);
intel_modeset_setup_hw_state(dev); intel_modeset_setup_hw_state(dev, false);
drm_mode_config_reset(dev);
drm_irq_install(dev); drm_irq_install(dev);
} }
@ -561,14 +571,41 @@ static int i915_drm_thaw(struct drm_device *dev)
dev_priv->modeset_on_lid = 0; dev_priv->modeset_on_lid = 0;
console_lock(); /*
intel_fbdev_set_suspend(dev, 0); * The console lock can be pretty contented on resume due
console_unlock(); * to all the printk activity. Try to keep it out of the hot
* path of resume if possible.
*/
if (console_trylock()) {
intel_fbdev_set_suspend(dev, 0);
console_unlock();
} else {
schedule_work(&dev_priv->console_resume_work);
}
return error;
}
static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;
intel_gt_reset(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
__i915_drm_thaw(dev);
return error; return error;
} }
int i915_resume(struct drm_device *dev) int i915_resume(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@ -579,7 +616,20 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev); pci_set_master(dev->pdev);
ret = i915_drm_thaw(dev); intel_gt_reset(dev);
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
* earlier) need this since the BIOS might clear all our scratch PTEs.
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
!dev_priv->opregion.header) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
ret = __i915_drm_thaw(dev);
if (ret) if (ret)
return ret; return ret;
@ -833,7 +883,7 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info = struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data; (struct intel_device_info *) ent->driver_data;
if (intel_info->is_haswell || intel_info->is_valleyview) if (intel_info->is_valleyview)
if(!i915_preliminary_hw_support) { if(!i915_preliminary_hw_support) {
DRM_ERROR("Preliminary hardware support disabled\n"); DRM_ERROR("Preliminary hardware support disabled\n");
return -ENODEV; return -ENODEV;
@ -1140,12 +1190,40 @@ static bool IS_DISPLAYREG(u32 reg)
if (reg == GEN6_GDRST) if (reg == GEN6_GDRST)
return false; return false;
switch (reg) {
case _3D_CHICKEN3:
case IVB_CHICKEN3:
case GEN7_COMMON_SLICE_CHICKEN1:
case GEN7_L3CNTLREG1:
case GEN7_L3_CHICKEN_MODE_REGISTER:
case GEN7_ROW_CHICKEN2:
case GEN7_L3SQCREG4:
case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
case GEN7_HALF_SLICE_CHICKEN1:
case GEN6_MBCTL:
case GEN6_UCGCTL2:
return false;
default:
break;
}
return true; return true;
} }
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
/* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
* chip from rc6 before touching it for real. MI_MODE is masked, hence
* harmless to write 0 into. */
I915_WRITE_NOTRACE(MI_MODE, 0);
}
#define __i915_read(x, y) \ #define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \ u##x val = 0; \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
unsigned long irqflags; \ unsigned long irqflags; \
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
@ -1177,6 +1255,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \ } \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
} \
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
write##y(val, dev_priv->regs + reg + 0x180000); \ write##y(val, dev_priv->regs + reg + 0x180000); \
} else { \ } else { \

View File

@ -58,6 +58,14 @@ enum pipe {
}; };
#define pipe_name(p) ((p) + 'A') #define pipe_name(p) ((p) + 'A')
enum transcoder {
TRANSCODER_A = 0,
TRANSCODER_B,
TRANSCODER_C,
TRANSCODER_EDP = 0xF,
};
#define transcoder_name(t) ((t) + 'A')
enum plane { enum plane {
PLANE_A = 0, PLANE_A = 0,
PLANE_B, PLANE_B,
@ -93,6 +101,12 @@ struct intel_pch_pll {
}; };
#define I915_NUM_PLLS 2 #define I915_NUM_PLLS 2
struct intel_ddi_plls {
int spll_refcount;
int wrpll1_refcount;
int wrpll2_refcount;
};
/* Interface history: /* Interface history:
* *
* 1.1: Original. * 1.1: Original.
@ -123,14 +137,6 @@ struct drm_i915_gem_phys_object {
struct drm_i915_gem_object *cur_obj; struct drm_i915_gem_object *cur_obj;
}; };
struct mem_block {
struct mem_block *next;
struct mem_block *prev;
int start;
int size;
struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
};
struct opregion_header; struct opregion_header;
struct opregion_acpi; struct opregion_acpi;
struct opregion_swsci; struct opregion_swsci;
@ -191,6 +197,7 @@ struct drm_i915_error_state {
u32 instdone[I915_NUM_RINGS]; u32 instdone[I915_NUM_RINGS];
u32 acthd[I915_NUM_RINGS]; u32 acthd[I915_NUM_RINGS];
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
/* our own tracking of ring head and tail */ /* our own tracking of ring head and tail */
u32 cpu_ring_head[I915_NUM_RINGS]; u32 cpu_ring_head[I915_NUM_RINGS];
@ -251,6 +258,7 @@ struct drm_i915_display_funcs {
uint32_t sprite_width, int pixel_size); uint32_t sprite_width, int pixel_size);
void (*update_linetime_wm)(struct drm_device *dev, int pipe, void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode); struct drm_display_mode *mode);
void (*modeset_global_resources)(struct drm_device *dev);
int (*crtc_mode_set)(struct drm_crtc *crtc, int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, struct drm_display_mode *adjusted_mode,
@ -263,7 +271,6 @@ struct drm_i915_display_funcs {
struct drm_crtc *crtc); struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc); void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev); void (*init_clock_gating)(struct drm_device *dev);
void (*init_pch_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb, struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj); struct drm_i915_gem_object *obj);
@ -338,6 +345,7 @@ struct intel_device_info {
#define I915_PPGTT_PD_ENTRIES 512 #define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024 #define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt { struct i915_hw_ppgtt {
struct drm_device *dev;
unsigned num_pd_entries; unsigned num_pd_entries;
struct page **pt_pages; struct page **pt_pages;
uint32_t pd_offset; uint32_t pd_offset;
@ -374,6 +382,11 @@ enum intel_pch {
PCH_LPT, /* Lynxpoint PCH */ PCH_LPT, /* Lynxpoint PCH */
}; };
enum intel_sbi_destination {
SBI_ICLK,
SBI_MPHY,
};
#define QUIRK_PIPEA_FORCE (1<<0) #define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1) #define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2) #define QUIRK_INVERT_BRIGHTNESS (1<<2)
@ -383,154 +396,18 @@ struct intel_fbc_work;
struct intel_gmbus { struct intel_gmbus {
struct i2c_adapter adapter; struct i2c_adapter adapter;
bool force_bit; u32 force_bit;
u32 reg0; u32 reg0;
u32 gpio_reg; u32 gpio_reg;
struct i2c_algo_bit_data bit_algo; struct i2c_algo_bit_data bit_algo;
struct drm_i915_private *dev_priv; struct drm_i915_private *dev_priv;
}; };
typedef struct drm_i915_private { struct i915_suspend_saved_registers {
struct drm_device *dev;
const struct intel_device_info *info;
int relative_constants_mode;
void __iomem *regs;
struct drm_i915_gt_funcs gt;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
struct spinlock gt_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
/**
* Base address of the gmbus and gpio block.
*/
uint32_t gpio_mmio_base;
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
uint32_t counter;
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
struct resource mch_res;
atomic_t irq_received;
/* protects the irq masks */
spinlock_t irq_lock;
/* DPIO indirect register protection */
spinlock_t dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
u32 pch_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
int num_pipe;
int num_pch_pll;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
unsigned int stop_rings;
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
int cfb_y;
struct intel_fbc_work *fbc_work;
struct intel_opregion opregion;
/* overlay */
struct intel_overlay *overlay;
bool sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
int preemphasis;
int vswing;
bool initialized;
bool support;
int bpp;
struct edp_power_seq pps;
} edp;
bool no_aux_handshake;
struct notifier_block lid_notifier;
int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
struct workqueue_struct *wq;
/* Display functions */
struct drm_i915_display_funcs display;
/* PCH chipset type */
enum intel_pch pch_type;
unsigned long quirks;
/* Register state */
bool modeset_on_lid;
u8 saveLBB; u8 saveLBB;
u32 saveDSPACNTR; u32 saveDSPACNTR;
u32 saveDSPBCNTR; u32 saveDSPBCNTR;
u32 saveDSPARB; u32 saveDSPARB;
u32 saveHWS;
u32 savePIPEACONF; u32 savePIPEACONF;
u32 savePIPEBCONF; u32 savePIPEBCONF;
u32 savePIPEASRC; u32 savePIPEASRC;
@ -676,10 +553,206 @@ typedef struct drm_i915_private {
u32 savePIPEB_LINK_N1; u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY; u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG; u32 savePCH_PORT_HOTPLUG;
};
struct intel_gen6_power_mgmt {
struct work_struct work;
u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
spinlock_t lock;
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
struct delayed_work delayed_resume_work;
/*
* Protects RPS/RC6 register access and PCU communication.
* Must be taken after struct_mutex if nested.
*/
struct mutex hw_lock;
};
struct intel_ilk_power_mgmt {
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 fmax;
u8 fstart;
u64 last_count1;
unsigned long last_time1;
unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
u8 corr;
int c_m;
int r_t;
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
};
struct i915_dri1_state {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
unsigned int cpp;
int back_offset;
int front_offset;
int current_page;
int page_flipping;
uint32_t counter;
};
struct intel_l3_parity {
u32 *remap_info;
struct work_struct error_work;
};
typedef struct drm_i915_private {
struct drm_device *dev;
const struct intel_device_info *info;
int relative_constants_mode;
void __iomem *regs;
struct drm_i915_gt_funcs gt;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
struct spinlock gt_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
/**
* Base address of the gmbus and gpio block.
*/
uint32_t gpio_mmio_base;
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
atomic_t irq_received;
/* protects the irq masks */
spinlock_t irq_lock;
/* DPIO indirect register protection */
spinlock_t dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
u32 pch_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
int num_pipe;
int num_pch_pll;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
unsigned int stop_rings;
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
int cfb_y;
struct intel_fbc_work *fbc_work;
struct intel_opregion opregion;
/* overlay */
struct intel_overlay *overlay;
bool sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
int preemphasis;
int vswing;
bool initialized;
bool support;
int bpp;
struct edp_power_seq pps;
} edp;
bool no_aux_handshake;
int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
struct workqueue_struct *wq;
/* Display functions */
struct drm_i915_display_funcs display;
/* PCH chipset type */
enum intel_pch pch_type;
unsigned short pch_id;
unsigned long quirks;
/* Register state */
bool modeset_on_lid;
struct { struct {
/** Bridge to intel-gtt-ko */ /** Bridge to intel-gtt-ko */
const struct intel_gtt *gtt; struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */ /** Memory allocator for GTT stolen memory */
struct drm_mm stolen; struct drm_mm stolen;
/** Memory allocator for GTT */ /** Memory allocator for GTT */
@ -706,8 +779,6 @@ typedef struct drm_i915_private {
/** PPGTT used for aliasing the PPGTT with the GTT */ /** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt; struct i915_hw_ppgtt *aliasing_ppgtt;
u32 *l3_remap_info;
struct shrinker inactive_shrinker; struct shrinker inactive_shrinker;
/** /**
@ -785,19 +856,6 @@ typedef struct drm_i915_private {
u32 object_count; u32 object_count;
} mm; } mm;
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
unsigned int cpp;
int back_offset;
int front_offset;
int current_page;
int page_flipping;
} dri1;
/* Kernel Modesetting */ /* Kernel Modesetting */
struct sdvo_device_mapping sdvo_mappings[2]; struct sdvo_device_mapping sdvo_mappings[2];
@ -811,6 +869,7 @@ typedef struct drm_i915_private {
wait_queue_head_t pending_flip_queue; wait_queue_head_t pending_flip_queue;
struct intel_pch_pll pch_plls[I915_NUM_PLLS]; struct intel_pch_pll pch_plls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
/* Reclocking support */ /* Reclocking support */
bool render_reclock_avail; bool render_reclock_avail;
@ -820,46 +879,17 @@ typedef struct drm_i915_private {
u16 orig_clock; u16 orig_clock;
int child_dev_num; int child_dev_num;
struct child_device_config *child_dev; struct child_device_config *child_dev;
struct drm_connector *int_lvds_connector;
struct drm_connector *int_edp_connector;
bool mchbar_need_disable; bool mchbar_need_disable;
/* gen6+ rps state */ struct intel_l3_parity l3_parity;
struct {
struct work_struct work;
u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
spinlock_t lock;
/* The below variables an all the rps hw state are protected by /* gen6+ rps state */
* dev->struct mutext. */ struct intel_gen6_power_mgmt rps;
u8 cur_delay;
u8 min_delay;
u8 max_delay;
} rps;
/* ilk-only ips/rps state. Everything in here is protected by the global /* ilk-only ips/rps state. Everything in here is protected by the global
* mchdev_lock in intel_pm.c */ * mchdev_lock in intel_pm.c */
struct { struct intel_ilk_power_mgmt ips;
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 fmax;
u8 fstart;
u64 last_count1;
unsigned long last_time1;
unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
u8 corr;
int c_m;
int r_t;
} ips;
enum no_fbc_reason no_fbc_reason; enum no_fbc_reason no_fbc_reason;
@ -871,14 +901,27 @@ typedef struct drm_i915_private {
/* list of fbdev register on this device */ /* list of fbdev register on this device */
struct intel_fbdev *fbdev; struct intel_fbdev *fbdev;
/*
* The console may be contended at resume, but we don't
* want it to block on it.
*/
struct work_struct console_resume_work;
struct backlight_device *backlight; struct backlight_device *backlight;
struct drm_property *broadcast_rgb_property; struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property; struct drm_property *force_audio_property;
struct work_struct parity_error_work;
bool hw_contexts_disabled; bool hw_contexts_disabled;
uint32_t hw_context_size; uint32_t hw_context_size;
bool fdi_rx_polarity_reversed;
struct i915_suspend_saved_registers regfile;
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
} drm_i915_private_t; } drm_i915_private_t;
/* Iterate over initialised rings */ /* Iterate over initialised rings */
@ -1120,9 +1163,14 @@ struct drm_i915_file_private {
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
(dev)->pci_device == 0x0152 || \
(dev)->pci_device == 0x015a)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pci_device & 0xFF00) == 0x0A00)
/* /*
* The genX designation typically refers to the render engine, so render * The genX designation typically refers to the render engine, so render
@ -1168,6 +1216,13 @@ struct drm_i915_file_private {
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@ -1250,6 +1305,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */ /* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data); void i915_hangcheck_elapsed(unsigned long data);
@ -1257,6 +1313,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev); extern void intel_irq_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev); extern void intel_gt_init(struct drm_device *dev);
extern void intel_gt_reset(struct drm_device *dev);
void i915_error_state_free(struct kref *error_ref); void i915_error_state_free(struct kref *error_ref);
@ -1368,8 +1425,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj, int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to); struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring);
u32 seqno);
int i915_gem_dumb_create(struct drm_file *file_priv, int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev, struct drm_device *dev,
@ -1387,7 +1443,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0; return (int32_t)(seq1 - seq2) >= 0;
} }
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@ -1499,6 +1555,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start, unsigned long start,
unsigned long mappable_end, unsigned long mappable_end,
unsigned long end); unsigned long end);
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_gtt_fini(struct drm_device *dev);
static inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
intel_gtt_chipset_flush();
}
/* i915_gem_evict.c */ /* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
@ -1595,11 +1659,12 @@ extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev); extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
extern bool intel_fbc_enabled(struct drm_device *dev); extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev); extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void ironlake_init_pch_refclk(struct drm_device *dev); extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev); extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
@ -1628,6 +1693,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
#define __i915_read(x, y) \ #define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);

View File

@ -845,12 +845,12 @@ out:
* domain anymore. */ * domain anymore. */
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj); i915_gem_clflush_object(obj);
intel_gtt_chipset_flush(); i915_gem_chipset_flush(dev);
} }
} }
if (needs_clflush_after) if (needs_clflush_after)
intel_gtt_chipset_flush(); i915_gem_chipset_flush(dev);
return ret; return ret;
} }
@ -1345,30 +1345,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write); trace_i915_gem_object_fault(obj, page_offset, true, write);
/* Now bind it into the GTT if needed */ /* Now bind it into the GTT if needed */
if (!obj->map_and_fenceable) { ret = i915_gem_object_pin(obj, 0, true, false);
ret = i915_gem_object_unbind(obj);
if (ret)
goto unlock;
}
if (!obj->gtt_space) {
ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
if (ret)
goto unlock;
ret = i915_gem_object_set_to_gtt_domain(obj, write);
if (ret)
goto unlock;
}
if (!obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(obj, obj->cache_level);
ret = i915_gem_object_get_fence(obj);
if (ret) if (ret)
goto unlock; goto unlock;
if (i915_gem_object_is_inactive(obj)) ret = i915_gem_object_set_to_gtt_domain(obj, write);
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); if (ret)
goto unpin;
ret = i915_gem_object_get_fence(obj);
if (ret)
goto unpin;
obj->fault_mappable = true; obj->fault_mappable = true;
@ -1377,6 +1364,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
unpin:
i915_gem_object_unpin(obj);
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
out: out:
@ -1707,10 +1696,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages_pin_count) if (obj->pages_pin_count)
return -EBUSY; return -EBUSY;
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early. */
list_del(&obj->gtt_list);
ops->put_pages(obj); ops->put_pages(obj);
obj->pages = NULL; obj->pages = NULL;
list_del(&obj->gtt_list);
if (i915_gem_object_is_purgeable(obj)) if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj); i915_gem_object_truncate(obj);
@ -1868,11 +1861,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
void void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring)
u32 seqno)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL); BUG_ON(ring == NULL);
obj->ring = ring; obj->ring = ring;
@ -1933,26 +1926,54 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
WARN_ON(i915_verify_lists(dev)); WARN_ON(i915_verify_lists(dev));
} }
static u32 static int
i915_gem_get_seqno(struct drm_device *dev) i915_gem_handle_seqno_wrap(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 seqno = dev_priv->next_seqno; struct intel_ring_buffer *ring;
int ret, i, j;
/* reserve 0 for non-seqno */ /* The hardware uses various monotonic 32-bit counters, if we
if (++dev_priv->next_seqno == 0) * detect that they will wraparound we need to idle the GPU
dev_priv->next_seqno = 1; * and reset those counters.
*/
ret = 0;
for_each_ring(ring, dev_priv, i) {
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
ret |= ring->sync_seqno[j] != 0;
}
if (ret == 0)
return ret;
return seqno; ret = i915_gpu_idle(dev);
if (ret)
return ret;
i915_gem_retire_requests(dev);
for_each_ring(ring, dev_priv, i) {
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
ring->sync_seqno[j] = 0;
}
return 0;
} }
u32 int
i915_gem_next_request_seqno(struct intel_ring_buffer *ring) i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
{ {
if (ring->outstanding_lazy_request == 0) struct drm_i915_private *dev_priv = dev->dev_private;
ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
return ring->outstanding_lazy_request; /* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) {
int ret = i915_gem_handle_seqno_wrap(dev);
if (ret)
return ret;
dev_priv->next_seqno = 1;
}
*seqno = dev_priv->next_seqno++;
return 0;
} }
int int
@ -1963,7 +1984,6 @@ i915_add_request(struct intel_ring_buffer *ring,
drm_i915_private_t *dev_priv = ring->dev->dev_private; drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
u32 request_ring_position; u32 request_ring_position;
u32 seqno;
int was_empty; int was_empty;
int ret; int ret;
@ -1982,7 +2002,6 @@ i915_add_request(struct intel_ring_buffer *ring,
if (request == NULL) if (request == NULL)
return -ENOMEM; return -ENOMEM;
seqno = i915_gem_next_request_seqno(ring);
/* Record the position of the start of the request so that /* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the * should we detect the updated seqno part-way through the
@ -1991,15 +2010,13 @@ i915_add_request(struct intel_ring_buffer *ring,
*/ */
request_ring_position = intel_ring_get_tail(ring); request_ring_position = intel_ring_get_tail(ring);
ret = ring->add_request(ring, &seqno); ret = ring->add_request(ring);
if (ret) { if (ret) {
kfree(request); kfree(request);
return ret; return ret;
} }
trace_i915_gem_request_add(ring, seqno); request->seqno = intel_ring_get_seqno(ring);
request->seqno = seqno;
request->ring = ring; request->ring = ring;
request->tail = request_ring_position; request->tail = request_ring_position;
request->emitted_jiffies = jiffies; request->emitted_jiffies = jiffies;
@ -2017,23 +2034,24 @@ i915_add_request(struct intel_ring_buffer *ring,
spin_unlock(&file_priv->mm.lock); spin_unlock(&file_priv->mm.lock);
} }
trace_i915_gem_request_add(ring, request->seqno);
ring->outstanding_lazy_request = 0; ring->outstanding_lazy_request = 0;
if (!dev_priv->mm.suspended) { if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) { if (i915_enable_hangcheck) {
mod_timer(&dev_priv->hangcheck_timer, mod_timer(&dev_priv->hangcheck_timer,
jiffies + round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
} }
if (was_empty) { if (was_empty) {
queue_delayed_work(dev_priv->wq, queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ); &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev); intel_mark_busy(dev_priv->dev);
} }
} }
if (out_seqno) if (out_seqno)
*out_seqno = seqno; *out_seqno = request->seqno;
return 0; return 0;
} }
@ -2131,7 +2149,6 @@ void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{ {
uint32_t seqno; uint32_t seqno;
int i;
if (list_empty(&ring->request_list)) if (list_empty(&ring->request_list))
return; return;
@ -2140,10 +2157,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
seqno = ring->get_seqno(ring, true); seqno = ring->get_seqno(ring, true);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
if (seqno >= ring->sync_seqno[i])
ring->sync_seqno[i] = 0;
while (!list_empty(&ring->request_list)) { while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
@ -2218,7 +2231,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
/* Come back later if the device is busy... */ /* Come back later if the device is busy... */
if (!mutex_trylock(&dev->struct_mutex)) { if (!mutex_trylock(&dev->struct_mutex)) {
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
return; return;
} }
@ -2236,7 +2250,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
} }
if (!dev_priv->mm.suspended && !idle) if (!dev_priv->mm.suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
if (idle) if (idle)
intel_mark_idle(dev); intel_mark_idle(dev);
@ -2386,7 +2401,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
ret = to->sync_to(to, from, seqno); ret = to->sync_to(to, from, seqno);
if (!ret) if (!ret)
from->sync_seqno[idx] = seqno; /* We use last_read_seqno because sync_to()
* might have just caused seqno wrap under
* the radar.
*/
from->sync_seqno[idx] = obj->last_read_seqno;
return ret; return ret;
} }
@ -2469,14 +2488,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
static int i915_ring_idle(struct intel_ring_buffer *ring)
{
if (list_empty(&ring->active_list))
return 0;
return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
}
int i915_gpu_idle(struct drm_device *dev) int i915_gpu_idle(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
@ -2489,7 +2500,7 @@ int i915_gpu_idle(struct drm_device *dev)
if (ret) if (ret)
return ret; return ret;
ret = i915_ring_idle(ring); ret = intel_ring_idle(ring);
if (ret) if (ret)
return ret; return ret;
} }
@ -2923,13 +2934,14 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
i915_gem_object_pin_pages(obj);
search_free: search_free:
if (map_and_fenceable) if (map_and_fenceable)
free_space = free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, size, alignment, obj->cache_level,
size, alignment, obj->cache_level, 0, dev_priv->mm.gtt_mappable_end,
0, dev_priv->mm.gtt_mappable_end, false);
false);
else else
free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
size, alignment, obj->cache_level, size, alignment, obj->cache_level,
@ -2937,60 +2949,60 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (free_space != NULL) { if (free_space != NULL) {
if (map_and_fenceable) if (map_and_fenceable)
obj->gtt_space = free_space =
drm_mm_get_block_range_generic(free_space, drm_mm_get_block_range_generic(free_space,
size, alignment, obj->cache_level, size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end, 0, dev_priv->mm.gtt_mappable_end,
false); false);
else else
obj->gtt_space = free_space =
drm_mm_get_block_generic(free_space, drm_mm_get_block_generic(free_space,
size, alignment, obj->cache_level, size, alignment, obj->cache_level,
false); false);
} }
if (obj->gtt_space == NULL) { if (free_space == NULL) {
ret = i915_gem_evict_something(dev, size, alignment, ret = i915_gem_evict_something(dev, size, alignment,
obj->cache_level, obj->cache_level,
map_and_fenceable, map_and_fenceable,
nonblocking); nonblocking);
if (ret) if (ret) {
i915_gem_object_unpin_pages(obj);
return ret; return ret;
}
goto search_free; goto search_free;
} }
if (WARN_ON(!i915_gem_valid_gtt_space(dev, if (WARN_ON(!i915_gem_valid_gtt_space(dev,
obj->gtt_space, free_space,
obj->cache_level))) { obj->cache_level))) {
drm_mm_put_block(obj->gtt_space); i915_gem_object_unpin_pages(obj);
obj->gtt_space = NULL; drm_mm_put_block(free_space);
return -EINVAL; return -EINVAL;
} }
ret = i915_gem_gtt_prepare_object(obj); ret = i915_gem_gtt_prepare_object(obj);
if (ret) { if (ret) {
drm_mm_put_block(obj->gtt_space); i915_gem_object_unpin_pages(obj);
obj->gtt_space = NULL; drm_mm_put_block(free_space);
return ret; return ret;
} }
if (!dev_priv->mm.aliasing_ppgtt)
i915_gem_gtt_bind_object(obj, obj->cache_level);
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
obj->gtt_offset = obj->gtt_space->start; obj->gtt_space = free_space;
obj->gtt_offset = free_space->start;
fenceable = fenceable =
obj->gtt_space->size == fence_size && free_space->size == fence_size &&
(obj->gtt_space->start & (fence_alignment - 1)) == 0; (free_space->start & (fence_alignment - 1)) == 0;
mappable = mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
obj->map_and_fenceable = mappable && fenceable; obj->map_and_fenceable = mappable && fenceable;
i915_gem_object_unpin_pages(obj);
trace_i915_gem_object_bind(obj, map_and_fenceable); trace_i915_gem_object_bind(obj, map_and_fenceable);
i915_gem_verify_gtt(dev); i915_gem_verify_gtt(dev);
return 0; return 0;
@ -3059,7 +3071,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
return; return;
i915_gem_clflush_object(obj); i915_gem_clflush_object(obj);
intel_gtt_chipset_flush(); i915_gem_chipset_flush(obj->base.dev);
old_write_domain = obj->base.write_domain; old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0; obj->base.write_domain = 0;
@ -3454,11 +3466,16 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
} }
if (obj->gtt_space == NULL) { if (obj->gtt_space == NULL) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
ret = i915_gem_object_bind_to_gtt(obj, alignment, ret = i915_gem_object_bind_to_gtt(obj, alignment,
map_and_fenceable, map_and_fenceable,
nonblocking); nonblocking);
if (ret) if (ret)
return ret; return ret;
if (!dev_priv->mm.aliasing_ppgtt)
i915_gem_gtt_bind_object(obj, obj->cache_level);
} }
if (!obj->has_global_gtt_mapping && map_and_fenceable) if (!obj->has_global_gtt_mapping && map_and_fenceable)
@ -3832,7 +3849,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
if (!IS_IVYBRIDGE(dev)) if (!IS_IVYBRIDGE(dev))
return; return;
if (!dev_priv->mm.l3_remap_info) if (!dev_priv->l3_parity.remap_info)
return; return;
misccpctl = I915_READ(GEN7_MISCCPCTL); misccpctl = I915_READ(GEN7_MISCCPCTL);
@ -3841,12 +3858,12 @@ void i915_gem_l3_remap(struct drm_device *dev)
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
u32 remap = I915_READ(GEN7_L3LOG_BASE + i); u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
if (remap && remap != dev_priv->mm.l3_remap_info[i/4]) if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG("0x%x was already programmed to %x\n", DRM_DEBUG("0x%x was already programmed to %x\n",
GEN7_L3LOG_BASE + i, remap); GEN7_L3LOG_BASE + i, remap);
if (remap && !dev_priv->mm.l3_remap_info[i/4]) if (remap && !dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG_DRIVER("Clearing remapped register\n"); DRM_DEBUG_DRIVER("Clearing remapped register\n");
I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]); I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
} }
/* Make sure all the writes land before disabling dop clock gating */ /* Make sure all the writes land before disabling dop clock gating */
@ -3876,68 +3893,6 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
} }
void i915_gem_init_ppgtt(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
uint32_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
if (!dev_priv->mm.aliasing_ppgtt)
return;
pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
if (dev_priv->mm.gtt->needs_dmar)
pt_addr = ppgtt->pt_dma_addr[i];
else
pt_addr = page_to_phys(ppgtt->pt_pages[i]);
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
if (INTEL_INFO(dev)->gen == 6) {
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
gab_ctl = I915_READ(GAB_CTL);
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
}
}
static bool static bool
intel_enable_blt(struct drm_device *dev) intel_enable_blt(struct drm_device *dev)
{ {
@ -3960,7 +3915,7 @@ i915_gem_init_hw(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int ret; int ret;
if (!intel_enable_gtt()) if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO; return -EIO;
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
@ -4295,7 +4250,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_cache_release(page); page_cache_release(page);
} }
} }
intel_gtt_chipset_flush(); i915_gem_chipset_flush(dev);
obj->phys_obj->cur_obj = NULL; obj->phys_obj->cur_obj = NULL;
obj->phys_obj = NULL; obj->phys_obj = NULL;
@ -4382,7 +4337,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
return -EFAULT; return -EFAULT;
} }
intel_gtt_chipset_flush(); i915_gem_chipset_flush(dev);
return 0; return 0;
} }
@ -4407,6 +4362,19 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
spin_unlock(&file_priv->mm.lock); spin_unlock(&file_priv->mm.lock);
} }
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
return false;
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
return false;
#endif
}
static int static int
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
{ {
@ -4417,10 +4385,15 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan; int nr_to_scan = sc->nr_to_scan;
bool unlock = true;
int cnt; int cnt;
if (!mutex_trylock(&dev->struct_mutex)) if (!mutex_trylock(&dev->struct_mutex)) {
return 0; if (!mutex_is_locked_by(&dev->struct_mutex, current))
return 0;
unlock = false;
}
if (nr_to_scan) { if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
@ -4436,6 +4409,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
if (obj->pin_count == 0 && obj->pages_pin_count == 0) if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT; cnt += obj->base.size >> PAGE_SHIFT;
mutex_unlock(&dev->struct_mutex); if (unlock)
mutex_unlock(&dev->struct_mutex);
return cnt; return cnt;
} }

View File

@ -146,7 +146,7 @@ create_hw_context(struct drm_device *dev,
struct i915_hw_context *ctx; struct i915_hw_context *ctx;
int ret, id; int ret, id;
ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL) if (ctx == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -410,9 +410,8 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed. * MI_SET_CONTEXT instead of when the next seqno has completed.
*/ */
if (from_obj != NULL) { if (from_obj != NULL) {
u32 seqno = i915_gem_next_request_seqno(ring);
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_gem_object_move_to_active(from_obj, ring, seqno); i915_gem_object_move_to_active(from_obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the * whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be * object dirty. The only exception is that the context must be

View File

@ -128,15 +128,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_i915_obj->cache_level); target_i915_obj->cache_level);
} }
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
if (unlikely(target_offset == 0)) {
DRM_DEBUG("No GTT space found for object %d\n",
reloc->target_handle);
return ret;
}
/* Validate that the target is in a valid r/w GPU domain */ /* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
DRM_DEBUG("reloc with multiple write domains: " DRM_DEBUG("reloc with multiple write domains: "
@ -672,7 +663,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
} }
if (flush_domains & I915_GEM_DOMAIN_CPU) if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush(); i915_gem_chipset_flush(ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT) if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb(); wmb();
@ -722,8 +713,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
static void static void
i915_gem_execbuffer_move_to_active(struct list_head *objects, i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring)
u32 seqno)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
@ -735,10 +725,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.write_domain = obj->base.pending_write_domain; obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access; obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
i915_gem_object_move_to_active(obj, ring, seqno); i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) { if (obj->base.write_domain) {
obj->dirty = 1; obj->dirty = 1;
obj->last_write_seqno = seqno; obj->last_write_seqno = intel_ring_get_seqno(ring);
if (obj->pin_count) /* check for potential scanout */ if (obj->pin_count) /* check for potential scanout */
intel_mark_fb_busy(obj); intel_mark_fb_busy(obj);
} }
@ -798,8 +788,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct intel_ring_buffer *ring; struct intel_ring_buffer *ring;
u32 ctx_id = i915_execbuffer2_get_context_id(*args); u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len; u32 exec_start, exec_len;
u32 seqno;
u32 mask; u32 mask;
u32 flags;
int ret, mode, i; int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) { if (!i915_gem_check_execbuffer(args)) {
@ -811,6 +801,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret) if (ret)
return ret; return ret;
flags = 0;
if (args->flags & I915_EXEC_SECURE) {
if (!file->is_master || !capable(CAP_SYS_ADMIN))
return -EPERM;
flags |= I915_DISPATCH_SECURE;
}
switch (args->flags & I915_EXEC_RING_MASK) { switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT: case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER: case I915_EXEC_RENDER:
@ -983,26 +981,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
} }
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but let's be paranoid and do it
* unconditionally for now. */
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
if (ret) if (ret)
goto err; goto err;
seqno = i915_gem_next_request_seqno(ring);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
if (seqno < ring->sync_seqno[i]) {
/* The GPU can not handle its semaphore value wrapping,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
ret = i915_gpu_idle(dev);
if (ret)
goto err;
i915_gem_retire_requests(dev);
BUG_ON(ring->sync_seqno[i]);
}
}
ret = i915_switch_context(ring, file, ctx_id); ret = i915_switch_context(ring, file, ctx_id);
if (ret) if (ret)
goto err; goto err;
@ -1028,8 +1017,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err; goto err;
} }
trace_i915_gem_ring_dispatch(ring, seqno);
exec_start = batch_obj->gtt_offset + args->batch_start_offset; exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len; exec_len = args->batch_len;
if (cliprects) { if (cliprects) {
@ -1040,17 +1027,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err; goto err;
ret = ring->dispatch_execbuffer(ring, ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len); exec_start, exec_len,
flags);
if (ret) if (ret)
goto err; goto err;
} }
} else { } else {
ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret) if (ret)
goto err; goto err;
} }
i915_gem_execbuffer_move_to_active(&objects, ring, seqno); trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
i915_gem_execbuffer_move_to_active(&objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring); i915_gem_execbuffer_retire_commands(dev, file, ring);
err: err:

View File

@ -28,19 +28,67 @@
#include "i915_trace.h" #include "i915_trace.h"
#include "intel_drv.h" #include "intel_drv.h"
typedef uint32_t gtt_pte_t;
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PDE_VALID (1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define GEN6_PTE_VALID (1 << 0)
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
static inline gtt_pte_t pte_encode(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level)
{
gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
case I915_CACHE_LLC_MLC:
/* Haswell doesn't set L3 this way */
if (IS_HASWELL(dev))
pte |= GEN6_PTE_CACHE_LLC;
else
pte |= GEN6_PTE_CACHE_LLC_MLC;
break;
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
if (IS_HASWELL(dev))
pte |= HSW_PTE_UNCACHED;
else
pte |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
}
return pte;
}
/* PPGTT support for Sandybdrige/Gen6 and later */ /* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry, unsigned first_entry,
unsigned num_entries) unsigned num_entries)
{ {
uint32_t *pt_vaddr; gtt_pte_t *pt_vaddr;
uint32_t scratch_pte; gtt_pte_t scratch_pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i; unsigned last_pte, i;
scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr); scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC; I915_CACHE_LLC);
while (num_entries) { while (num_entries) {
last_pte = first_pte + num_entries; last_pte = first_pte + num_entries;
@ -77,6 +125,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (!ppgtt) if (!ppgtt)
return ret; return ret;
ppgtt->dev = dev;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL); GFP_KERNEL);
@ -118,7 +167,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
i915_ppgtt_clear_range(ppgtt, 0, i915_ppgtt_clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t); ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
dev_priv->mm.aliasing_ppgtt = ppgtt; dev_priv->mm.aliasing_ppgtt = ppgtt;
@ -168,9 +217,9 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
const struct sg_table *pages, const struct sg_table *pages,
unsigned first_entry, unsigned first_entry,
uint32_t pte_flags) enum i915_cache_level cache_level)
{ {
uint32_t *pt_vaddr, pte; gtt_pte_t *pt_vaddr;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned i, j, m, segment_len; unsigned i, j, m, segment_len;
@ -188,8 +237,8 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
pte = GEN6_PTE_ADDR_ENCODE(page_addr); pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
pt_vaddr[j] = pte | pte_flags; cache_level);
/* grab the next page */ /* grab the next page */
if (++m == segment_len) { if (++m == segment_len) {
@ -213,29 +262,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level) enum i915_cache_level cache_level)
{ {
uint32_t pte_flags = GEN6_PTE_VALID;
switch (cache_level) {
case I915_CACHE_LLC_MLC:
pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
break;
case I915_CACHE_LLC:
pte_flags |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
if (IS_HASWELL(obj->base.dev))
pte_flags |= HSW_PTE_UNCACHED;
else
pte_flags |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
}
i915_ppgtt_insert_sg_entries(ppgtt, i915_ppgtt_insert_sg_entries(ppgtt,
obj->pages, obj->pages,
obj->gtt_space->start >> PAGE_SHIFT, obj->gtt_space->start >> PAGE_SHIFT,
pte_flags); cache_level);
} }
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@ -246,23 +276,65 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
obj->base.size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT);
} }
/* XXX kill agp_type! */ void i915_gem_init_ppgtt(struct drm_device *dev)
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
enum i915_cache_level cache_level)
{ {
switch (cache_level) { drm_i915_private_t *dev_priv = dev->dev_private;
case I915_CACHE_LLC_MLC: uint32_t pd_offset;
if (INTEL_INFO(dev)->gen >= 6) struct intel_ring_buffer *ring;
return AGP_USER_CACHED_MEMORY_LLC_MLC; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
/* Older chipsets do not have this extra level of CPU uint32_t __iomem *pd_addr;
* cacheing, so fallthrough and request the PTE simply uint32_t pd_entry;
* as cached. int i;
*/
case I915_CACHE_LLC: if (!dev_priv->mm.aliasing_ppgtt)
return AGP_USER_CACHED_MEMORY; return;
default:
case I915_CACHE_NONE:
return AGP_USER_MEMORY; pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
if (dev_priv->mm.gtt->needs_dmar)
pt_addr = ppgtt->pt_dma_addr[i];
else
pt_addr = page_to_phys(ppgtt->pt_pages[i]);
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
if (INTEL_INFO(dev)->gen == 6) {
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
gab_ctl = I915_READ(GAB_CTL);
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
} }
} }
@ -288,13 +360,40 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible; dev_priv->mm.interruptible = interruptible;
} }
static void i915_ggtt_clear_range(struct drm_device *dev,
unsigned first_entry,
unsigned num_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
gtt_pte_t scratch_pte;
gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
int i;
if (INTEL_INFO(dev)->gen < 6) {
intel_gtt_clear_range(first_entry, num_entries);
return;
}
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
num_entries = max_entries;
scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev) void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */ /* First fill our portion of the GTT with scratch pages */
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
@ -302,7 +401,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_gem_gtt_bind_object(obj, obj->cache_level); i915_gem_gtt_bind_object(obj, obj->cache_level);
} }
intel_gtt_chipset_flush(); i915_gem_chipset_flush(dev);
} }
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
@ -318,21 +417,76 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct sg_table *st = obj->pages;
struct scatterlist *sg = st->sgl;
const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
int unused, i = 0;
unsigned int len, m = 0;
dma_addr_t addr;
for_each_sg(st->sgl, sg, st->nents, unused) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
i++;
}
}
BUG_ON(i > max_entries);
BUG_ON(i != obj->base.size / PAGE_SIZE);
/* XXX: This serves as a posting read to make sure that the PTE has
* actually been updated. There is some concern that even though
* registers and PTEs are within the same BAR that they are potentially
* of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level) enum i915_cache_level cache_level)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); if (INTEL_INFO(dev)->gen < 6) {
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
intel_gtt_insert_sg_entries(obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
flags);
} else {
gen6_ggtt_bind_object(obj, cache_level);
}
intel_gtt_insert_sg_entries(obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
agp_type);
obj->has_global_gtt_mapping = 1; obj->has_global_gtt_mapping = 1;
} }
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{ {
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, i915_ggtt_clear_range(obj->base.dev,
obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0; obj->has_global_gtt_mapping = 0;
@ -390,5 +544,165 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
/* ... but ensure that we clear the entire range. */ /* ... but ensure that we clear the entire range. */
intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
}
static int setup_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct page *page;
dma_addr_t dma_addr;
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
get_page(page);
set_pages_uc(page, 1);
#ifdef CONFIG_INTEL_IOMMU
dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, dma_addr))
return -EINVAL;
#else
dma_addr = page_to_phys(page);
#endif
dev_priv->mm.gtt->scratch_page = page;
dev_priv->mm.gtt->scratch_page_dma = dma_addr;
return 0;
}
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(dev_priv->mm.gtt->scratch_page);
__free_page(dev_priv->mm.gtt->scratch_page);
}
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
return snb_gmch_ctl << 20;
}
static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
return snb_gmch_ctl << 25; /* 32 MB units */
}
static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
{
static const int stolen_decoder[] = {
0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
return stolen_decoder[snb_gmch_ctl] << 20;
}
int i915_gem_gtt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_bus_addr;
u16 snb_gmch_ctl;
int ret;
/* On modern platforms we need not worry ourself with the legacy
* hostbridge query stuff. Skip it entirely
*/
if (INTEL_INFO(dev)->gen < 6) {
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
if (!ret) {
DRM_ERROR("failed to set up gmch\n");
return -EIO;
}
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
intel_gmch_remove();
return -ENODEV;
}
return 0;
}
dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
if (!dev_priv->mm.gtt)
return -ENOMEM;
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
#ifdef CONFIG_INTEL_IOMMU
dev_priv->mm.gtt->needs_dmar = 1;
#endif
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
/* i9xx_setup */
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
dev_priv->mm.gtt->gtt_total_entries =
gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
if (INTEL_INFO(dev)->gen < 7)
dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
else
dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
/* 64/512MB is the current min/max we actually know of, but this is just a
* coarse sanity check.
*/
if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
DRM_ERROR("Unknown GMADR entries (%d)\n",
dev_priv->mm.gtt->gtt_mappable_entries);
ret = -ENXIO;
goto err_out;
}
ret = setup_scratch_page(dev);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
goto err_out;
}
dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
if (!dev_priv->mm.gtt->gtt) {
DRM_ERROR("Failed to map the gtt page table\n");
teardown_scratch_page(dev);
ret = -ENOMEM;
goto err_out;
}
/* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
return 0;
err_out:
kfree(dev_priv->mm.gtt);
if (INTEL_INFO(dev)->gen < 6)
intel_gmch_remove();
return ret;
}
void i915_gem_gtt_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
iounmap(dev_priv->mm.gtt->gtt);
teardown_scratch_page(dev);
if (INTEL_INFO(dev)->gen < 6)
intel_gmch_remove();
kfree(dev_priv->mm.gtt);
} }

View File

@ -122,7 +122,10 @@ static int
i915_pipe_enabled(struct drm_device *dev, int pipe) i915_pipe_enabled(struct drm_device *dev, int pipe)
{ {
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
} }
/* Called from drm generic code, passed a 'crtc', which /* Called from drm generic code, passed a 'crtc', which
@ -182,6 +185,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
int vbl_start, vbl_end, htotal, vtotal; int vbl_start, vbl_end, htotal, vtotal;
bool in_vbl = true; bool in_vbl = true;
int ret = 0; int ret = 0;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
if (!i915_pipe_enabled(dev, pipe)) { if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@ -190,7 +195,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
} }
/* Get vtotal. */ /* Get vtotal. */
vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_INFO(dev)->gen >= 4) {
/* No obvious pixelcount register. Only query vertical /* No obvious pixelcount register. Only query vertical
@ -210,13 +215,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
*/ */
position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
*vpos = position / htotal; *vpos = position / htotal;
*hpos = position - (*vpos * htotal); *hpos = position - (*vpos * htotal);
} }
/* Query vblank area. */ /* Query vblank area. */
vbl = I915_READ(VBLANK(pipe)); vbl = I915_READ(VBLANK(cpu_transcoder));
/* Test position against vblank region. */ /* Test position against vblank region. */
vbl_start = vbl & 0x1fff; vbl_start = vbl & 0x1fff;
@ -352,8 +357,7 @@ static void notify_ring(struct drm_device *dev,
if (i915_enable_hangcheck) { if (i915_enable_hangcheck) {
dev_priv->hangcheck_count = 0; dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer, mod_timer(&dev_priv->hangcheck_timer,
jiffies + round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
} }
} }
@ -374,7 +378,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return; return;
mutex_lock(&dev_priv->dev->struct_mutex); mutex_lock(&dev_priv->rps.hw_lock);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
new_delay = dev_priv->rps.cur_delay + 1; new_delay = dev_priv->rps.cur_delay + 1;
@ -389,7 +393,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
gen6_set_rps(dev_priv->dev, new_delay); gen6_set_rps(dev_priv->dev, new_delay);
} }
mutex_unlock(&dev_priv->dev->struct_mutex); mutex_unlock(&dev_priv->rps.hw_lock);
} }
@ -405,7 +409,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
static void ivybridge_parity_work(struct work_struct *work) static void ivybridge_parity_work(struct work_struct *work)
{ {
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
parity_error_work); l3_parity.error_work);
u32 error_status, row, bank, subbank; u32 error_status, row, bank, subbank;
char *parity_event[5]; char *parity_event[5];
uint32_t misccpctl; uint32_t misccpctl;
@ -469,7 +473,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
I915_WRITE(GTIMR, dev_priv->gt_irq_mask); I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags); spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
queue_work(dev_priv->wq, &dev_priv->parity_error_work); queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
} }
static void snb_gt_irq_handler(struct drm_device *dev, static void snb_gt_irq_handler(struct drm_device *dev,
@ -520,7 +524,7 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
queue_work(dev_priv->wq, &dev_priv->rps.work); queue_work(dev_priv->wq, &dev_priv->rps.work);
} }
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -606,6 +610,9 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe; int pipe;
if (pch_iir & SDE_HOTPLUG_MASK)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
if (pch_iir & SDE_AUDIO_POWER_MASK) if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >> (pch_iir & SDE_AUDIO_POWER_MASK) >>
@ -646,6 +653,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe; int pipe;
if (pch_iir & SDE_HOTPLUG_MASK_CPT)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@ -670,7 +680,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
I915_READ(FDI_RX_IIR(pipe))); I915_READ(FDI_RX_IIR(pipe)));
} }
static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -709,8 +719,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_PCH_EVENT_IVB) { if (de_iir & DE_PCH_EVENT_IVB) {
u32 pch_iir = I915_READ(SDEIIR); u32 pch_iir = I915_READ(SDEIIR);
if (pch_iir & SDE_HOTPLUG_MASK_CPT)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
cpt_irq_handler(dev, pch_iir); cpt_irq_handler(dev, pch_iir);
/* clear PCH hotplug event before clear CPU irq */ /* clear PCH hotplug event before clear CPU irq */
@ -745,13 +753,12 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
notify_ring(dev, &dev_priv->ring[VCS]); notify_ring(dev, &dev_priv->ring[VCS]);
} }
static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE; int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 hotplug_mask;
atomic_inc(&dev_priv->irq_received); atomic_inc(&dev_priv->irq_received);
@ -769,11 +776,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
(!IS_GEN6(dev) || pm_iir == 0)) (!IS_GEN6(dev) || pm_iir == 0))
goto done; goto done;
if (HAS_PCH_CPT(dev))
hotplug_mask = SDE_HOTPLUG_MASK_CPT;
else
hotplug_mask = SDE_HOTPLUG_MASK;
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
if (IS_GEN5(dev)) if (IS_GEN5(dev))
@ -802,8 +804,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
/* check event from PCH */ /* check event from PCH */
if (de_iir & DE_PCH_EVENT) { if (de_iir & DE_PCH_EVENT) {
if (pch_iir & hotplug_mask)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
if (HAS_PCH_CPT(dev)) if (HAS_PCH_CPT(dev))
cpt_irq_handler(dev, pch_iir); cpt_irq_handler(dev, pch_iir);
else else
@ -1120,6 +1120,8 @@ static void i915_record_ring_state(struct drm_device *dev,
= I915_READ(RING_SYNC_0(ring->mmio_base)); = I915_READ(RING_SYNC_0(ring->mmio_base));
error->semaphore_mboxes[ring->id][1] error->semaphore_mboxes[ring->id][1]
= I915_READ(RING_SYNC_1(ring->mmio_base)); = I915_READ(RING_SYNC_1(ring->mmio_base));
error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
} }
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_INFO(dev)->gen >= 4) {
@ -1464,7 +1466,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags); spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work; work = intel_crtc->unpin_work;
if (work == NULL || work->pending || !work->enable_stall_check) { if (work == NULL ||
atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
!work->enable_stall_check) {
/* Either the pending flip IRQ arrived, or we're too early. Don't check */ /* Either the pending flip IRQ arrived, or we're too early. Don't check */
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);
return; return;
@ -1751,7 +1755,7 @@ void i915_hangcheck_elapsed(unsigned long data)
repeat: repeat:
/* Reset timer case chip hangs without another request being added */ /* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer, mod_timer(&dev_priv->hangcheck_timer,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
} }
/* drm_dma.h hooks /* drm_dma.h hooks
@ -1956,6 +1960,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
u32 enable_mask; u32 enable_mask;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u32 render_irqs;
u16 msid; u16 msid;
enable_mask = I915_DISPLAY_PORT_INTERRUPT; enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@ -1995,21 +2000,12 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff);
dev_priv->gt_irq_mask = ~0;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask); I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
GT_GEN6_BLT_CS_ERROR_INTERRUPT | render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
GT_GEN6_BLT_USER_INTERRUPT | GEN6_BLITTER_USER_INTERRUPT;
GT_GEN6_BSD_USER_INTERRUPT | I915_WRITE(GTIER, render_irqs);
GT_GEN6_BSD_CS_ERROR_INTERRUPT |
GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
GT_PIPE_NOTIFY |
GT_RENDER_CS_ERROR_INTERRUPT |
GT_SYNC_STATUS |
GT_USER_INTERRUPT);
POSTING_READ(GTIER); POSTING_READ(GTIER);
/* ack & enable invalid PTE error interrupts */ /* ack & enable invalid PTE error interrupts */
@ -2019,7 +2015,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
#endif #endif
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
#if 0 /* FIXME: check register definitions; some have moved */
/* Note HDMI and DP share bits */ /* Note HDMI and DP share bits */
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN; hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@ -2027,15 +2022,14 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
hotplug_en |= HDMIC_HOTPLUG_INT_EN; hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN; hotplug_en |= HDMID_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN; hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN; hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN; hotplug_en |= CRT_HOTPLUG_INT_EN;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
} }
#endif
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
@ -2129,7 +2123,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0; return 0;
} }
static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS) static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -2307,7 +2301,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
return 0; return 0;
} }
static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS) static irqreturn_t i915_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -2545,7 +2539,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
return 0; return 0;
} }
static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS) static irqreturn_t i965_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -2691,7 +2685,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func); INIT_WORK(&dev_priv->error_work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */

View File

@ -26,6 +26,7 @@
#define _I915_REG_H_ #define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@ -40,6 +41,14 @@
*/ */
#define INTEL_GMCH_CTRL 0x52 #define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1) #define INTEL_GMCH_VGA_DISABLE (1 << 1)
#define SNB_GMCH_CTRL 0x50
#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
#define SNB_GMCH_GGMS_MASK 0x3
#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
#define SNB_GMCH_GMS_MASK 0x1f
#define IVB_GMCH_GMS_SHIFT 4
#define IVB_GMCH_GMS_MASK 0xf
/* PCI config space */ /* PCI config space */
@ -105,23 +114,6 @@
#define GEN6_GRDOM_MEDIA (1 << 2) #define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3) #define GEN6_GRDOM_BLT (1 << 3)
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PDE_VALID (1 << 0)
#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define GEN6_PTE_VALID (1 << 0)
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_CACHE_BITS (3 << 1)
#define GEN6_PTE_GFDT (1 << 3)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) #define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) #define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) #define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
@ -241,11 +233,18 @@
*/ */
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_INVALIDATE_TLB (1<<18) #define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_BSD (1<<7) #define MI_INVALIDATE_TLB (1<<18)
#define MI_FLUSH_DW_OP_STOREDW (1<<14)
#define MI_INVALIDATE_BSD (1<<7)
#define MI_FLUSH_DW_USE_GTT (1<<2)
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) #define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8) /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_PPGTT_HSW (1<<8)
#define MI_BATCH_NON_SECURE_HSW (1<<13)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
@ -369,6 +368,7 @@
#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ #define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ #define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ #define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
#define DPIO_PLL_REFCLK_SEL_MASK 3
#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ #define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ #define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
#define _DPIO_REFSFR_B 0x8034 #define _DPIO_REFSFR_B 0x8034
@ -384,6 +384,9 @@
#define DPIO_FASTCLK_DISABLE 0x8100 #define DPIO_FASTCLK_DISABLE 0x8100
#define DPIO_DATA_CHANNEL1 0x8220
#define DPIO_DATA_CHANNEL2 0x8420
/* /*
* Fence registers * Fence registers
*/ */
@ -521,6 +524,7 @@
*/ */
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090 #define _3D_CHICKEN3 0x02090
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define MI_MODE 0x0209c #define MI_MODE 0x0209c
@ -547,6 +551,8 @@
#define IIR 0x020a4 #define IIR 0x020a4
#define IMR 0x020a8 #define IMR 0x020a8
#define ISR 0x020ac #define ISR 0x020ac
#define VLV_GUNIT_CLOCK_GATE 0x182060
#define GCFG_DIS (1<<8)
#define VLV_IIR_RW 0x182084 #define VLV_IIR_RW 0x182084
#define VLV_IER 0x1820a0 #define VLV_IER 0x1820a0
#define VLV_IIR 0x1820a4 #define VLV_IIR 0x1820a4
@ -661,6 +667,7 @@
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ #define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
#define CACHE_MODE_0 0x02120 /* 915+ only */ #define CACHE_MODE_0 0x02120 /* 915+ only */
#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
#define CM0_IZ_OPT_DISABLE (1<<6) #define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5) #define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) #define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@ -670,6 +677,8 @@
#define CM0_RC_OP_FLUSH_DISABLE (1<<0) #define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define BB_ADDR 0x02140 /* 8 bytes */ #define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ #define GFX_FLSH_CNTL 0x02170 /* 915+ only */
#define GFX_FLSH_CNTL_GEN6 0x101008
#define GFX_FLSH_CNTL_EN (1<<0)
#define ECOSKPD 0x021d0 #define ECOSKPD 0x021d0
#define ECO_GATING_CX_ONLY (1<<3) #define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0) #define ECO_FLIP_DONE (1<<0)
@ -1559,14 +1568,14 @@
#define _VSYNCSHIFT_B 0x61028 #define _VSYNCSHIFT_B 0x61028
#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) #define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B) #define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B) #define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) #define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) #define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B) #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
/* VGA port control */ /* VGA port control */
#define ADPA 0x61100 #define ADPA 0x61100
@ -2641,6 +2650,7 @@
#define PIPECONF_GAMMA (1<<24) #define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25) #define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_INTERLACE_MASK (7 << 21) #define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel /* Note that pre-gen3 does not support interlaced display directly. Panel
* fitting must be disabled on pre-ilk for interlaced. */ * fitting must be disabled on pre-ilk for interlaced. */
#define PIPECONF_PROGRESSIVE (0 << 21) #define PIPECONF_PROGRESSIVE (0 << 21)
@ -2711,7 +2721,7 @@
#define PIPE_12BPC (3 << 5) #define PIPE_12BPC (3 << 5)
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF) #define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) #define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) #define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
@ -2998,12 +3008,19 @@
#define DISPPLANE_GAMMA_ENABLE (1<<30) #define DISPPLANE_GAMMA_ENABLE (1<<30)
#define DISPPLANE_GAMMA_DISABLE 0 #define DISPPLANE_GAMMA_DISABLE 0
#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) #define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
#define DISPPLANE_YUV422 (0x0<<26)
#define DISPPLANE_8BPP (0x2<<26) #define DISPPLANE_8BPP (0x2<<26)
#define DISPPLANE_15_16BPP (0x4<<26) #define DISPPLANE_BGRA555 (0x3<<26)
#define DISPPLANE_16BPP (0x5<<26) #define DISPPLANE_BGRX555 (0x4<<26)
#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) #define DISPPLANE_BGRX565 (0x5<<26)
#define DISPPLANE_32BPP (0x7<<26) #define DISPPLANE_BGRX888 (0x6<<26)
#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) #define DISPPLANE_BGRA888 (0x7<<26)
#define DISPPLANE_RGBX101010 (0x8<<26)
#define DISPPLANE_RGBA101010 (0x9<<26)
#define DISPPLANE_BGRX101010 (0xa<<26)
#define DISPPLANE_RGBX161616 (0xc<<26)
#define DISPPLANE_RGBX888 (0xe<<26)
#define DISPPLANE_RGBA888 (0xf<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25) #define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0 #define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_SEL_PIPE_SHIFT 24 #define DISPPLANE_SEL_PIPE_SHIFT 24
@ -3024,6 +3041,8 @@
#define _DSPASIZE 0x70190 #define _DSPASIZE 0x70190
#define _DSPASURF 0x7019C /* 965+ only */ #define _DSPASURF 0x7019C /* 965+ only */
#define _DSPATILEOFF 0x701A4 /* 965+ only */ #define _DSPATILEOFF 0x701A4 /* 965+ only */
#define _DSPAOFFSET 0x701A4 /* HSW */
#define _DSPASURFLIVE 0x701AC
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) #define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) #define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@ -3033,6 +3052,8 @@
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane) #define DSPLINOFF(plane) DSPADDR(plane)
#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
/* Display/Sprite base address macros */ /* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000) #define DISP_BASEADDR_MASK (0xfffff000)
@ -3078,6 +3099,8 @@
#define _DSPBSIZE 0x71190 #define _DSPBSIZE 0x71190
#define _DSPBSURF 0x7119C #define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4 #define _DSPBTILEOFF 0x711A4
#define _DSPBOFFSET 0x711A4
#define _DSPBSURFLIVE 0x711AC
/* Sprite A control */ /* Sprite A control */
#define _DVSACNTR 0x72180 #define _DVSACNTR 0x72180
@ -3143,6 +3166,7 @@
#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) #define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) #define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) #define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
#define _SPRA_CTL 0x70280 #define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1<<31) #define SPRITE_ENABLE (1<<31)
@ -3177,6 +3201,8 @@
#define _SPRA_SURF 0x7029c #define _SPRA_SURF 0x7029c
#define _SPRA_KEYMAX 0x702a0 #define _SPRA_KEYMAX 0x702a0
#define _SPRA_TILEOFF 0x702a4 #define _SPRA_TILEOFF 0x702a4
#define _SPRA_OFFSET 0x702a4
#define _SPRA_SURFLIVE 0x702ac
#define _SPRA_SCALE 0x70304 #define _SPRA_SCALE 0x70304
#define SPRITE_SCALE_ENABLE (1<<31) #define SPRITE_SCALE_ENABLE (1<<31)
#define SPRITE_FILTER_MASK (3<<29) #define SPRITE_FILTER_MASK (3<<29)
@ -3197,6 +3223,8 @@
#define _SPRB_SURF 0x7129c #define _SPRB_SURF 0x7129c
#define _SPRB_KEYMAX 0x712a0 #define _SPRB_KEYMAX 0x712a0
#define _SPRB_TILEOFF 0x712a4 #define _SPRB_TILEOFF 0x712a4
#define _SPRB_OFFSET 0x712a4
#define _SPRB_SURFLIVE 0x712ac
#define _SPRB_SCALE 0x71304 #define _SPRB_SCALE 0x71304
#define _SPRB_GAMC 0x71400 #define _SPRB_GAMC 0x71400
@ -3210,8 +3238,10 @@
#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) #define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) #define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) #define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) #define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) #define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
/* VBIOS regs */ /* VBIOS regs */
#define VGACNTRL 0x71400 #define VGACNTRL 0x71400
@ -3246,12 +3276,6 @@
#define DISPLAY_PORT_PLL_BIOS_1 0x46010 #define DISPLAY_PORT_PLL_BIOS_1 0x46010
#define DISPLAY_PORT_PLL_BIOS_2 0x46014 #define DISPLAY_PORT_PLL_BIOS_2 0x46014
#define PCH_DSPCLK_GATE_D 0x42020
# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
#define PCH_3DCGDIS0 0x46020 #define PCH_3DCGDIS0 0x46020
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@ -3301,20 +3325,22 @@
#define _PIPEB_LINK_M2 0x61048 #define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c #define _PIPEB_LINK_N2 0x6104c
#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1) #define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1) #define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2) #define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2) #define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1) #define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1) #define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2) #define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2) #define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
/* CPU panel fitter */ /* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
#define _PFA_CTL_1 0x68080 #define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880 #define _PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31) #define PF_ENABLE (1<<31)
#define PF_PIPE_SEL_MASK_IVB (3<<29)
#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
#define PF_FILTER_MASK (3<<23) #define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23) #define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23) #define PF_FILTER_MED_3x3 (1<<23)
@ -3423,15 +3449,13 @@
#define ILK_HDCP_DISABLE (1<<25) #define ILK_HDCP_DISABLE (1<<25)
#define ILK_eDP_A_DISABLE (1<<24) #define ILK_eDP_A_DISABLE (1<<24)
#define ILK_DESKTOP (1<<23) #define ILK_DESKTOP (1<<23)
#define ILK_DSPCLK_GATE 0x42020
#define IVB_VRHUNIT_CLK_GATE (1<<28)
#define ILK_DPARB_CLK_GATE (1<<5)
#define ILK_DPFD_CLK_GATE (1<<7)
/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ #define ILK_DSPCLK_GATE_D 0x42020
#define ILK_CLK_FBC (1<<7) #define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
#define ILK_DPFC_DIS1 (1<<8) #define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
#define ILK_DPFC_DIS2 (1<<9) #define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
#define IVB_CHICKEN3 0x4200c #define IVB_CHICKEN3 0x4200c
# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) # define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
@ -3447,14 +3471,21 @@
#define GEN7_L3CNTLREG1 0xB01C #define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
#define GEN7_L3AGDIS (1<<19)
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 #define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000 #define GEN7_WA_L3_CHICKEN_MODE 0x20000000
#define GEN7_L3SQCREG4 0xb034
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
/* WaCatErrorRejectionIssue */ /* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
#define HSW_FUSE_STRAP 0x42014
#define HSW_CDCLK_LIMIT (1 << 24)
/* PCH */ /* PCH */
/* south display engine interrupt: IBX */ /* south display engine interrupt: IBX */
@ -3686,7 +3717,7 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
#define VLV_VIDEO_DIP_CTL_A 0x60220 #define VLV_VIDEO_DIP_CTL_A 0x60200
#define VLV_VIDEO_DIP_DATA_A 0x60208 #define VLV_VIDEO_DIP_DATA_A 0x60208
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
@ -3795,18 +3826,26 @@
#define TRANS_6BPC (2<<5) #define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5) #define TRANS_12BPC (3<<5)
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
#define _TRANSA_CHICKEN2 0xf0064 #define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064 #define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31) #define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
#define SOUTH_CHICKEN1 0xc2000 #define SOUTH_CHICKEN1 0xc2000
#define FDIA_PHASE_SYNC_SHIFT_OVR 19 #define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18 #define FDIA_PHASE_SYNC_SHIFT_EN 18
#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) #define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) #define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define SOUTH_CHICKEN2 0xc2004 #define SOUTH_CHICKEN2 0xc2004
#define DPLS_EDP_PPS_FIX_DIS (1<<0) #define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
#define DPLS_EDP_PPS_FIX_DIS (1<<0)
#define _FDI_RXA_CHICKEN 0xc200c #define _FDI_RXA_CHICKEN 0xc200c
#define _FDI_RXB_CHICKEN 0xc2010 #define _FDI_RXB_CHICKEN 0xc2010
@ -3816,6 +3855,7 @@
#define SOUTH_DSPCLK_GATE_D 0xc2020 #define SOUTH_DSPCLK_GATE_D 0xc2020
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */ /* CPU: FDI_TX */
#define _FDI_TXA_CTL 0x60100 #define _FDI_TXA_CTL 0x60100
@ -3877,6 +3917,7 @@
#define FDI_FS_ERRC_ENABLE (1<<27) #define FDI_FS_ERRC_ENABLE (1<<27)
#define FDI_FE_ERRC_ENABLE (1<<26) #define FDI_FE_ERRC_ENABLE (1<<26)
#define FDI_DP_PORT_WIDTH_X8 (7<<19) #define FDI_DP_PORT_WIDTH_X8 (7<<19)
#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
#define FDI_8BPC (0<<16) #define FDI_8BPC (0<<16)
#define FDI_10BPC (1<<16) #define FDI_10BPC (1<<16)
#define FDI_6BPC (2<<16) #define FDI_6BPC (2<<16)
@ -3901,16 +3942,21 @@
#define FDI_PORT_WIDTH_2X_LPT (1<<19) #define FDI_PORT_WIDTH_2X_LPT (1<<19)
#define FDI_PORT_WIDTH_1X_LPT (0<<19) #define FDI_PORT_WIDTH_1X_LPT (0<<19)
#define _FDI_RXA_MISC 0xf0010 #define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010 #define _FDI_RXB_MISC 0xf1010
#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
#define FDI_RX_TP1_TO_TP2_48 (2<<20)
#define FDI_RX_TP1_TO_TP2_64 (3<<20)
#define FDI_RX_FDI_DELAY_90 (0x90<<0)
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define _FDI_RXA_TUSIZE1 0xf0030 #define _FDI_RXA_TUSIZE1 0xf0030
#define _FDI_RXA_TUSIZE2 0xf0038 #define _FDI_RXA_TUSIZE2 0xf0038
#define _FDI_RXB_TUSIZE1 0xf1030 #define _FDI_RXB_TUSIZE1 0xf1030
#define _FDI_RXB_TUSIZE2 0xf1038 #define _FDI_RXB_TUSIZE2 0xf1038
#define FDI_RX_TP1_TO_TP2_48 (2<<20)
#define FDI_RX_TP1_TO_TP2_64 (3<<20)
#define FDI_RX_FDI_DELAY_90 (0x90<<0)
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
@ -4003,6 +4049,11 @@
#define PANEL_LIGHT_ON_DELAY_SHIFT 0 #define PANEL_LIGHT_ON_DELAY_SHIFT 0
#define PCH_PP_OFF_DELAYS 0xc720c #define PCH_PP_OFF_DELAYS 0xc720c
#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
#define PANEL_POWER_PORT_LVDS (0 << 30)
#define PANEL_POWER_PORT_DP_A (1 << 30)
#define PANEL_POWER_PORT_DP_C (2 << 30)
#define PANEL_POWER_PORT_DP_D (3 << 30)
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) #define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_DOWN_DELAY_SHIFT 16 #define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) #define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@ -4050,7 +4101,7 @@
#define TRANS_DP_CTL_A 0xe0300 #define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300 #define TRANS_DP_CTL_B 0xe1300
#define TRANS_DP_CTL_C 0xe2300 #define TRANS_DP_CTL_C 0xe2300
#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000) #define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
#define TRANS_DP_OUTPUT_ENABLE (1<<31) #define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29) #define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29) #define TRANS_DP_PORT_SEL_C (1<<29)
@ -4108,6 +4159,8 @@
#define FORCEWAKE_ACK_HSW 0x130044 #define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090 #define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */ #define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040 #define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180 #define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5) #define FORCEWAKE_MT_ENABLE (1<<5)
@ -4220,6 +4273,10 @@
#define GEN6_READ_OC_PARAMS 0xc #define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
#define GEN6_PCODE_DATA 0x138128 #define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
@ -4251,6 +4308,15 @@
#define GEN7_L3LOG_BASE 0xB070 #define GEN7_L3LOG_BASE 0xB070
#define GEN7_L3LOG_SIZE 0x80 #define GEN7_L3LOG_SIZE 0x80
#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
#define GEN7_MAX_PS_THREAD_DEP (8<<12)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
#define GEN7_ROW_CHICKEN2 0xe4f4
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
#define G4X_AUD_VID_DID 0x62020 #define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB #define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801 #define INTEL_AUDIO_DEVBLC 0x80862801
@ -4380,33 +4446,39 @@
#define HSW_PWR_WELL_CTL6 0x45414 #define HSW_PWR_WELL_CTL6 0x45414
/* Per-pipe DDI Function Control */ /* Per-pipe DDI Function Control */
#define PIPE_DDI_FUNC_CTL_A 0x60400 #define TRANS_DDI_FUNC_CTL_A 0x60400
#define PIPE_DDI_FUNC_CTL_B 0x61400 #define TRANS_DDI_FUNC_CTL_B 0x61400
#define PIPE_DDI_FUNC_CTL_C 0x62400 #define TRANS_DDI_FUNC_CTL_C 0x62400
#define PIPE_DDI_FUNC_CTL_EDP 0x6F400 #define TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \ #define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
PIPE_DDI_FUNC_CTL_B) TRANS_DDI_FUNC_CTL_B)
#define PIPE_DDI_FUNC_ENABLE (1<<31) #define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define PIPE_DDI_PORT_MASK (7<<28) #define TRANS_DDI_PORT_MASK (7<<28)
#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) #define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
#define PIPE_DDI_MODE_SELECT_MASK (7<<24) #define TRANS_DDI_PORT_NONE (0<<28)
#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) #define TRANS_DDI_MODE_SELECT_MASK (7<<24)
#define PIPE_DDI_MODE_SELECT_DVI (1<<24) #define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) #define TRANS_DDI_MODE_SELECT_DVI (1<<24)
#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) #define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
#define PIPE_DDI_MODE_SELECT_FDI (4<<24) #define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
#define PIPE_DDI_BPC_MASK (7<<20) #define TRANS_DDI_MODE_SELECT_FDI (4<<24)
#define PIPE_DDI_BPC_8 (0<<20) #define TRANS_DDI_BPC_MASK (7<<20)
#define PIPE_DDI_BPC_10 (1<<20) #define TRANS_DDI_BPC_8 (0<<20)
#define PIPE_DDI_BPC_6 (2<<20) #define TRANS_DDI_BPC_10 (1<<20)
#define PIPE_DDI_BPC_12 (3<<20) #define TRANS_DDI_BPC_6 (2<<20)
#define PIPE_DDI_PVSYNC (1<<17) #define TRANS_DDI_BPC_12 (3<<20)
#define PIPE_DDI_PHSYNC (1<<16) #define TRANS_DDI_PVSYNC (1<<17)
#define PIPE_DDI_BFI_ENABLE (1<<4) #define TRANS_DDI_PHSYNC (1<<16)
#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) #define TRANS_DDI_EDP_INPUT_MASK (7<<12)
#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) #define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
#define PIPE_DDI_PORT_WIDTH_X4 (3<<1) #define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
#define TRANS_DDI_BFI_ENABLE (1<<4)
#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
/* DisplayPort Transport Control */ /* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040 #define DP_TP_CTL_A 0x64040
@ -4420,12 +4492,16 @@
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) #define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) #define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) #define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) #define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
/* DisplayPort Transport Status */ /* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044 #define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144 #define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
#define DP_TP_STATUS_IDLE_DONE (1<<25)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) #define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
/* DDI Buffer Control */ /* DDI Buffer Control */
@ -4444,6 +4520,7 @@
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24) #define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_IS_IDLE (1<<7) #define DDI_BUF_IS_IDLE (1<<7)
#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH_X1 (0<<1) #define DDI_PORT_WIDTH_X1 (0<<1)
#define DDI_PORT_WIDTH_X2 (1<<1) #define DDI_PORT_WIDTH_X2 (1<<1)
#define DDI_PORT_WIDTH_X4 (3<<1) #define DDI_PORT_WIDTH_X4 (3<<1)
@ -4460,6 +4537,10 @@
#define SBI_ADDR 0xC6000 #define SBI_ADDR 0xC6000
#define SBI_DATA 0xC6004 #define SBI_DATA 0xC6004
#define SBI_CTL_STAT 0xC6008 #define SBI_CTL_STAT 0xC6008
#define SBI_CTL_DEST_ICLK (0x0<<16)
#define SBI_CTL_DEST_MPHY (0x1<<16)
#define SBI_CTL_OP_IORD (0x2<<8)
#define SBI_CTL_OP_IOWR (0x3<<8)
#define SBI_CTL_OP_CRRD (0x6<<8) #define SBI_CTL_OP_CRRD (0x6<<8)
#define SBI_CTL_OP_CRWR (0x7<<8) #define SBI_CTL_OP_CRWR (0x7<<8)
#define SBI_RESPONSE_FAIL (0x1<<1) #define SBI_RESPONSE_FAIL (0x1<<1)
@ -4477,10 +4558,12 @@
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) #define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
#define SBI_SSCCTL 0x020c #define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C #define SBI_SSCCTL6 0x060C
#define SBI_SSCCTL_PATHALT (1<<3)
#define SBI_SSCCTL_DISABLE (1<<0) #define SBI_SSCCTL_DISABLE (1<<0)
#define SBI_SSCAUXDIV6 0x0610 #define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) #define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00 #define SBI_DBUFF0 0x2a00
#define SBI_DBUFF0_ENABLE (1<<0)
/* LPT PIXCLK_GATE */ /* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020 #define PIXCLK_GATE 0xC6020
@ -4490,8 +4573,8 @@
/* SPLL */ /* SPLL */
#define SPLL_CTL 0x46020 #define SPLL_CTL 0x46020
#define SPLL_PLL_ENABLE (1<<31) #define SPLL_PLL_ENABLE (1<<31)
#define SPLL_PLL_SCC (1<<28) #define SPLL_PLL_SSC (1<<28)
#define SPLL_PLL_NON_SCC (2<<28) #define SPLL_PLL_NON_SSC (2<<28)
#define SPLL_PLL_FREQ_810MHz (0<<26) #define SPLL_PLL_FREQ_810MHz (0<<26)
#define SPLL_PLL_FREQ_1350MHz (1<<26) #define SPLL_PLL_FREQ_1350MHz (1<<26)
@ -4500,7 +4583,7 @@
#define WRPLL_CTL2 0x46060 #define WRPLL_CTL2 0x46060
#define WRPLL_PLL_ENABLE (1<<31) #define WRPLL_PLL_ENABLE (1<<31)
#define WRPLL_PLL_SELECT_SSC (0x01<<28) #define WRPLL_PLL_SELECT_SSC (0x01<<28)
#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) #define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */ /* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
@ -4517,21 +4600,36 @@
#define PORT_CLK_SEL_SPLL (3<<29) #define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29) #define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29) #define PORT_CLK_SEL_WRPLL2 (5<<29)
#define PORT_CLK_SEL_NONE (7<<29)
/* Pipe clock selection */ /* Transcoder clock selection */
#define PIPE_CLK_SEL_A 0x46140 #define TRANS_CLK_SEL_A 0x46140
#define PIPE_CLK_SEL_B 0x46144 #define TRANS_CLK_SEL_B 0x46144
#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B) #define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
/* For each pipe, we need to select the corresponding port clock */ /* For each transcoder, we need to select the corresponding port clock */
#define PIPE_CLK_SEL_DISABLED (0x0<<29) #define TRANS_CLK_SEL_DISABLED (0x0<<29)
#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) #define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
#define _TRANSA_MSA_MISC 0x60410
#define _TRANSB_MSA_MISC 0x61410
#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
_TRANSB_MSA_MISC)
#define TRANS_MSA_SYNC_CLK (1<<0)
#define TRANS_MSA_6_BPC (0<<5)
#define TRANS_MSA_8_BPC (1<<5)
#define TRANS_MSA_10_BPC (2<<5)
#define TRANS_MSA_12_BPC (3<<5)
#define TRANS_MSA_16_BPC (4<<5)
/* LCPLL Control */ /* LCPLL Control */
#define LCPLL_CTL 0x130040 #define LCPLL_CTL 0x130040
#define LCPLL_PLL_DISABLE (1<<31) #define LCPLL_PLL_DISABLE (1<<31)
#define LCPLL_PLL_LOCK (1<<30) #define LCPLL_PLL_LOCK (1<<30)
#define LCPLL_CLK_FREQ_MASK (3<<26)
#define LCPLL_CLK_FREQ_450 (0<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25) #define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23) #define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
#define LCPLL_CD_SOURCE_FCLK (1<<21)
/* Pipe WM_LINETIME - watermark line time */ /* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270 #define PIPE_WM_LINETIME_A 0x45270

File diff suppressed because it is too large Load Diff

View File

@ -97,7 +97,7 @@ static struct attribute_group rc6_attr_group = {
static int l3_access_valid(struct drm_device *dev, loff_t offset) static int l3_access_valid(struct drm_device *dev, loff_t offset)
{ {
if (!IS_IVYBRIDGE(dev)) if (!HAS_L3_GPU_CACHE(dev))
return -EPERM; return -EPERM;
if (offset % 4 != 0) if (offset % 4 != 0)
@ -162,7 +162,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
if (ret) if (ret)
return ret; return ret;
if (!dev_priv->mm.l3_remap_info) { if (!dev_priv->l3_parity.remap_info) {
temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (!temp) { if (!temp) {
mutex_unlock(&drm_dev->struct_mutex); mutex_unlock(&drm_dev->struct_mutex);
@ -182,9 +182,9 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
* at this point it is left as a TODO. * at this point it is left as a TODO.
*/ */
if (temp) if (temp)
dev_priv->mm.l3_remap_info = temp; dev_priv->l3_parity.remap_info = temp;
memcpy(dev_priv->mm.l3_remap_info + (offset/4), memcpy(dev_priv->l3_parity.remap_info + (offset/4),
buf + (offset/4), buf + (offset/4),
count); count);
@ -211,12 +211,9 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
ret = i915_mutex_lock_interruptible(dev); mutex_lock(&dev_priv->rps.hw_lock);
if (ret)
return ret;
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret); return snprintf(buf, PAGE_SIZE, "%d", ret);
} }
@ -228,12 +225,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
ret = i915_mutex_lock_interruptible(dev); mutex_lock(&dev_priv->rps.hw_lock);
if (ret)
return ret;
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret); return snprintf(buf, PAGE_SIZE, "%d", ret);
} }
@ -254,16 +248,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val /= GT_FREQUENCY_MULTIPLIER; val /= GT_FREQUENCY_MULTIPLIER;
ret = mutex_lock_interruptible(&dev->struct_mutex); mutex_lock(&dev_priv->rps.hw_lock);
if (ret)
return ret;
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = (rp_state_cap & 0xff); hw_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16); hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL; return -EINVAL;
} }
@ -272,7 +264,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
dev_priv->rps.max_delay = val; dev_priv->rps.max_delay = val;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->rps.hw_lock);
return count; return count;
} }
@ -284,12 +276,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
ret = i915_mutex_lock_interruptible(dev); mutex_lock(&dev_priv->rps.hw_lock);
if (ret)
return ret;
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret); return snprintf(buf, PAGE_SIZE, "%d", ret);
} }
@ -310,16 +299,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val /= GT_FREQUENCY_MULTIPLIER; val /= GT_FREQUENCY_MULTIPLIER;
ret = mutex_lock_interruptible(&dev->struct_mutex); mutex_lock(&dev_priv->rps.hw_lock);
if (ret)
return ret;
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = (rp_state_cap & 0xff); hw_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16); hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL; return -EINVAL;
} }
@ -328,7 +315,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
dev_priv->rps.min_delay = val; dev_priv->rps.min_delay = val;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev_priv->rps.hw_lock);
return count; return count;

View File

@ -229,24 +229,26 @@ TRACE_EVENT(i915_gem_evict_everything,
); );
TRACE_EVENT(i915_gem_ring_dispatch, TRACE_EVENT(i915_gem_ring_dispatch,
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
TP_ARGS(ring, seqno), TP_ARGS(ring, seqno, flags),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, ring) __field(u32, ring)
__field(u32, seqno) __field(u32, seqno)
__field(u32, flags)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = ring->dev->primary->index; __entry->dev = ring->dev->primary->index;
__entry->ring = ring->id; __entry->ring = ring->id;
__entry->seqno = seqno; __entry->seqno = seqno;
__entry->flags = flags;
i915_trace_irq_get(ring, seqno); i915_trace_irq_get(ring, seqno);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u", TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
__entry->dev, __entry->ring, __entry->seqno) __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
); );
TRACE_EVENT(i915_gem_ring_flush, TRACE_EVENT(i915_gem_ring_flush,

View File

@ -755,7 +755,8 @@ void intel_setup_bios(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
/* Set the Panel Power On/Off timings if uninitialized. */ /* Set the Panel Power On/Off timings if uninitialized. */
if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { if (!HAS_PCH_SPLIT(dev) &&
I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
/* Set T2 to 40ms and T5 to 200ms */ /* Set T2 to 40ms and T5 to 200ms */
I915_WRITE(PP_ON_DELAYS, 0x019007d0); I915_WRITE(PP_ON_DELAYS, 0x019007d0);

View File

@ -198,6 +198,11 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock > max_clock) if (mode->clock > max_clock)
return MODE_CLOCK_HIGH; return MODE_CLOCK_HIGH;
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
if (HAS_PCH_LPT(dev) &&
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
return MODE_CLOCK_HIGH;
return MODE_OK; return MODE_OK;
} }
@ -221,14 +226,20 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa; u32 adpa;
adpa = ADPA_HOTPLUG_BITS; if (HAS_PCH_SPLIT(dev))
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH; adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH; adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */ /* For CPT allow 3 pipe config, for others just use A or B */
if (HAS_PCH_CPT(dev)) if (HAS_PCH_LPT(dev))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 0) else if (intel_crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT; adpa |= ADPA_PIPE_A_SELECT;
@ -401,12 +412,16 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter) struct i2c_adapter *adapter)
{ {
struct edid *edid; struct edid *edid;
int ret;
edid = intel_crt_get_edid(connector, adapter); edid = intel_crt_get_edid(connector, adapter);
if (!edid) if (!edid)
return 0; return 0;
return intel_connector_update_modes(connector, edid); ret = intel_connector_update_modes(connector, edid);
kfree(edid);
return ret;
} }
static bool intel_crt_detect_ddc(struct drm_connector *connector) static bool intel_crt_detect_ddc(struct drm_connector *connector)
@ -644,10 +659,22 @@ static int intel_crt_set_property(struct drm_connector *connector,
static void intel_crt_reset(struct drm_connector *connector) static void intel_crt_reset(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector); struct intel_crt *crt = intel_attached_crt(connector);
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev)) {
u32 adpa;
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
I915_WRITE(PCH_ADPA, adpa);
POSTING_READ(PCH_ADPA);
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1; crt->force_hotplug_required = 1;
}
} }
/* /*
@ -729,7 +756,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.type = INTEL_OUTPUT_ANALOG; crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = true; crt->base.cloneable = true;
if (IS_HASWELL(dev) || IS_I830(dev)) if (IS_I830(dev))
crt->base.crtc_mask = (1 << 0); crt->base.crtc_mask = (1 << 0);
else else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
@ -749,7 +776,10 @@ void intel_crt_init(struct drm_device *dev)
crt->base.disable = intel_disable_crt; crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt; crt->base.enable = intel_enable_crt;
crt->base.get_hw_state = intel_crt_get_hw_state; if (IS_HASWELL(dev))
crt->base.get_hw_state = intel_ddi_get_hw_state;
else
crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state;
drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs); drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
@ -766,18 +796,14 @@ void intel_crt_init(struct drm_device *dev)
* Configure the automatic hotplug detection stuff * Configure the automatic hotplug detection stuff
*/ */
crt->force_hotplug_required = 0; crt->force_hotplug_required = 0;
if (HAS_PCH_SPLIT(dev)) {
u32 adpa;
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
I915_WRITE(PCH_ADPA, adpa);
POSTING_READ(PCH_ADPA);
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
}
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
/*
* TODO: find a proper way to discover whether we need to set the
* polarity reversal bit or not, instead of relying on the BIOS.
*/
if (HAS_PCH_LPT(dev))
dev_priv->fdi_rx_polarity_reversed =
!!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -94,6 +94,7 @@
#define INTEL_OUTPUT_HDMI 6 #define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_DISPLAYPORT 7 #define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8 #define INTEL_OUTPUT_EDP 8
#define INTEL_OUTPUT_UNKNOWN 9
#define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1 #define INTEL_DVO_CHIP_LVDS 1
@ -163,6 +164,11 @@ struct intel_encoder {
int crtc_mask; int crtc_mask;
}; };
struct intel_panel {
struct drm_display_mode *fixed_mode;
int fitting_mode;
};
struct intel_connector { struct intel_connector {
struct drm_connector base; struct drm_connector base;
/* /*
@ -179,12 +185,19 @@ struct intel_connector {
/* Reads out the current hw, returning true if the connector is enabled /* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */ * and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *); bool (*get_hw_state)(struct intel_connector *);
/* Panel info for eDP and LVDS */
struct intel_panel panel;
/* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
struct edid *edid;
}; };
struct intel_crtc { struct intel_crtc {
struct drm_crtc base; struct drm_crtc base;
enum pipe pipe; enum pipe pipe;
enum plane plane; enum plane plane;
enum transcoder cpu_transcoder;
u8 lut_r[256], lut_g[256], lut_b[256]; u8 lut_r[256], lut_g[256], lut_b[256];
/* /*
* Whether the crtc and the connected output pipeline is active. Implies * Whether the crtc and the connected output pipeline is active. Implies
@ -198,6 +211,8 @@ struct intel_crtc {
struct intel_unpin_work *unpin_work; struct intel_unpin_work *unpin_work;
int fdi_lanes; int fdi_lanes;
atomic_t unpin_work_count;
/* Display surface base address adjustement for pageflips. Note that on /* Display surface base address adjustement for pageflips. Note that on
* gen4+ this only adjusts up to a tile, offsets within a tile are * gen4+ this only adjusts up to a tile, offsets within a tile are
* handled in the hw itself (with the TILEOFF register). */ * handled in the hw itself (with the TILEOFF register). */
@ -212,12 +227,14 @@ struct intel_crtc {
/* We can share PLLs across outputs if the timings match */ /* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll; struct intel_pch_pll *pch_pll;
uint32_t ddi_pll_sel;
}; };
struct intel_plane { struct intel_plane {
struct drm_plane base; struct drm_plane base;
enum pipe pipe; enum pipe pipe;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale; int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024]; u32 lut_r[1024], lut_g[1024], lut_b[1024];
void (*update_plane)(struct drm_plane *plane, void (*update_plane)(struct drm_plane *plane,
@ -317,10 +334,8 @@ struct dip_infoframe {
} __attribute__((packed)); } __attribute__((packed));
struct intel_hdmi { struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg; u32 sdvox_reg;
int ddc_bus; int ddc_bus;
int ddi_port;
uint32_t color_range; uint32_t color_range;
bool has_hdmi_sink; bool has_hdmi_sink;
bool has_audio; bool has_audio;
@ -331,18 +346,15 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
}; };
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_MAX_DOWNSTREAM_PORTS 0x10 #define DP_MAX_DOWNSTREAM_PORTS 0x10
#define DP_LINK_CONFIGURATION_SIZE 9 #define DP_LINK_CONFIGURATION_SIZE 9
struct intel_dp { struct intel_dp {
struct intel_encoder base;
uint32_t output_reg; uint32_t output_reg;
uint32_t DP; uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio; bool has_audio;
enum hdmi_force_audio force_audio; enum hdmi_force_audio force_audio;
enum port port;
uint32_t color_range; uint32_t color_range;
uint8_t link_bw; uint8_t link_bw;
uint8_t lane_count; uint8_t lane_count;
@ -357,11 +369,16 @@ struct intel_dp {
int panel_power_cycle_delay; int panel_power_cycle_delay;
int backlight_on_delay; int backlight_on_delay;
int backlight_off_delay; int backlight_off_delay;
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work; struct delayed_work panel_vdd_work;
bool want_panel_vdd; bool want_panel_vdd;
struct edid *edid; /* cached EDID for eDP */ struct intel_connector *attached_connector;
int edid_mode_count; };
struct intel_digital_port {
struct intel_encoder base;
enum port port;
struct intel_dp dp;
struct intel_hdmi hdmi;
}; };
static inline struct drm_crtc * static inline struct drm_crtc *
@ -380,11 +397,14 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
struct intel_unpin_work { struct intel_unpin_work {
struct work_struct work; struct work_struct work;
struct drm_device *dev; struct drm_crtc *crtc;
struct drm_i915_gem_object *old_fb_obj; struct drm_i915_gem_object *old_fb_obj;
struct drm_i915_gem_object *pending_flip_obj; struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event; struct drm_pending_vblank_event *event;
int pending; atomic_t pending;
#define INTEL_FLIP_INACTIVE 0
#define INTEL_FLIP_PENDING 1
#define INTEL_FLIP_COMPLETE 2
bool enable_stall_check; bool enable_stall_check;
}; };
@ -395,6 +415,8 @@ struct intel_fbc_work {
int interval; int interval;
}; };
int intel_pch_rawclk(struct drm_device *dev);
int intel_connector_update_modes(struct drm_connector *connector, int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid); struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
@ -405,7 +427,12 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev); extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, extern void intel_hdmi_init(struct drm_device *dev,
int sdvox_reg, enum port port); int sdvox_reg, enum port port);
extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob); bool is_sdvob);
@ -418,10 +445,27 @@ extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev); extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg, extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port); enum port port);
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev); extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *); extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern int intel_edp_target_clock(struct intel_encoder *, extern int intel_edp_target_clock(struct intel_encoder *,
struct drm_display_mode *mode); struct drm_display_mode *mode);
@ -431,6 +475,10 @@ extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane); enum plane plane);
/* intel_panel.c */ /* intel_panel.c */
extern int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode);
extern void intel_panel_fini(struct intel_panel *panel);
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev, extern void intel_pch_panel_fitting(struct drm_device *dev,
@ -439,7 +487,7 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev); extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern int intel_panel_setup_backlight(struct drm_device *dev); extern int intel_panel_setup_backlight(struct drm_connector *connector);
extern void intel_panel_enable_backlight(struct drm_device *dev, extern void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe); enum pipe pipe);
extern void intel_panel_disable_backlight(struct drm_device *dev); extern void intel_panel_disable_backlight(struct drm_device *dev);
@ -473,6 +521,31 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector
return to_intel_connector(connector)->encoder; return to_intel_connector(connector)->encoder;
} }
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port =
container_of(encoder, struct intel_digital_port, base.base);
return &intel_dig_port->dp;
}
static inline struct intel_digital_port *
enc_to_dig_port(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_digital_port, base.base);
}
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
return container_of(intel_dp, struct intel_digital_port, dp);
}
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
extern void intel_connector_attach_encoder(struct intel_connector *connector, extern void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder); struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@ -481,8 +554,12 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc); struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern enum transcoder
intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
struct intel_load_detect_pipe { struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb; struct drm_framebuffer *release_fb;
@ -550,6 +627,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe, extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode); struct drm_display_mode *mode);
extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
unsigned int bpp,
unsigned int pitch);
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
@ -573,12 +654,22 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev); extern void ironlake_teardown_rc6(struct drm_device *dev);
extern void intel_enable_ddi(struct intel_encoder *encoder);
extern void intel_disable_ddi(struct intel_encoder *encoder);
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe); enum pipe *pipe);
extern void intel_ddi_mode_set(struct drm_encoder *encoder, extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
struct drm_display_mode *mode, extern void intel_ddi_pll_init(struct drm_device *dev);
struct drm_display_mode *adjusted_mode); extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
extern bool
intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
#endif /* __INTEL_DRV_H__ */ #endif /* __INTEL_DRV_H__ */

View File

@ -36,10 +36,15 @@
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "i915_drv.h" #include "i915_drv.h"
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
{
return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
}
static void static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{ {
struct drm_device *dev = intel_hdmi->base.base.dev; struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t enabled_bits; uint32_t enabled_bits;
@ -51,13 +56,14 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{ {
return container_of(encoder, struct intel_hdmi, base.base); struct intel_digital_port *intel_dig_port =
container_of(encoder, struct intel_digital_port, base.base);
return &intel_dig_port->hdmi;
} }
static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
{ {
return container_of(intel_attached_encoder(connector), return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
struct intel_hdmi, base);
} }
void intel_dip_infoframe_csum(struct dip_infoframe *frame) void intel_dip_infoframe_csum(struct dip_infoframe *frame)
@ -334,6 +340,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
intel_set_infoframe(encoder, &avi_if); intel_set_infoframe(encoder, &avi_if);
} }
@ -754,16 +762,16 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK; return MODE_OK;
} }
static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
{ {
return true; return true;
} }
static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi) static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
{ {
struct drm_device *dev = intel_hdmi->base.base.dev; struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit; uint32_t bit;
@ -786,6 +794,9 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force) intel_hdmi_detect(struct drm_connector *connector, bool force)
{ {
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = connector->dev->dev_private; struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct edid *edid; struct edid *edid;
enum drm_connector_status status = connector_status_disconnected; enum drm_connector_status status = connector_status_disconnected;
@ -814,6 +825,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
intel_hdmi->has_audio = intel_hdmi->has_audio =
(intel_hdmi->force_audio == HDMI_AUDIO_ON); (intel_hdmi->force_audio == HDMI_AUDIO_ON);
intel_encoder->type = INTEL_OUTPUT_HDMI;
} }
return status; return status;
@ -859,10 +871,12 @@ intel_hdmi_set_property(struct drm_connector *connector,
uint64_t val) uint64_t val)
{ {
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct drm_i915_private *dev_priv = connector->dev->dev_private; struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret; int ret;
ret = drm_connector_property_set_value(connector, property, val); ret = drm_object_property_set_value(&connector->base, property, val);
if (ret) if (ret)
return ret; return ret;
@ -898,8 +912,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
return -EINVAL; return -EINVAL;
done: done:
if (intel_hdmi->base.base.crtc) { if (intel_dig_port->base.base.crtc) {
struct drm_crtc *crtc = intel_hdmi->base.base.crtc; struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
intel_set_mode(crtc, &crtc->mode, intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb); crtc->x, crtc->y, crtc->fb);
} }
@ -914,12 +928,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector); kfree(connector);
} }
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_ddi_mode_set,
.disable = intel_encoder_noop,
};
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_fixup = intel_hdmi_mode_fixup, .mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_hdmi_mode_set, .mode_set = intel_hdmi_mode_set,
@ -951,43 +959,24 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_broadcast_rgb_property(connector); intel_attach_broadcast_rgb_property(connector);
} }
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{ {
struct drm_connector *connector = &intel_connector->base;
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector; enum port port = intel_dig_port->port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_hdmi *intel_hdmi;
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
if (!intel_hdmi)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_hdmi);
return;
}
intel_encoder = &intel_hdmi->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA); DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
intel_encoder->type = INTEL_OUTPUT_HDMI;
connector->polled = DRM_CONNECTOR_POLL_HPD; connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 1; connector->interlace_allowed = 1;
connector->doublescan_allowed = 0; connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
intel_hdmi->ddi_port = port;
switch (port) { switch (port) {
case PORT_B: case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB; intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
@ -1007,8 +996,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
BUG(); BUG();
} }
intel_hdmi->sdvox_reg = sdvox_reg;
if (!HAS_PCH_SPLIT(dev)) { if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe; intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes; intel_hdmi->set_infoframes = g4x_set_infoframes;
@ -1026,21 +1013,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
intel_hdmi->set_infoframes = cpt_set_infoframes; intel_hdmi->set_infoframes = cpt_set_infoframes;
} }
if (IS_HASWELL(dev)) { if (IS_HASWELL(dev))
intel_encoder->enable = intel_enable_ddi; intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
intel_encoder->disable = intel_disable_ddi; else
intel_encoder->get_hw_state = intel_ddi_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state;
drm_encoder_helper_add(&intel_encoder->base,
&intel_hdmi_helper_funcs_hsw);
} else {
intel_encoder->enable = intel_enable_hdmi;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
drm_encoder_helper_add(&intel_encoder->base,
&intel_hdmi_helper_funcs);
}
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_hdmi_add_properties(intel_hdmi, connector); intel_hdmi_add_properties(intel_hdmi, connector);
@ -1056,3 +1032,42 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
} }
} }
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
{
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
if (!intel_dig_port)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dig_port);
return;
}
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_encoder->enable = intel_enable_hdmi;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->type = INTEL_OUTPUT_HDMI;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
intel_dig_port->port = port;
intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
intel_dig_port->dp.output_reg = 0;
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}

View File

@ -432,7 +432,7 @@ timeout:
I915_WRITE(GMBUS0 + reg_offset, 0); I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
bus->force_bit = true; bus->force_bit = 1;
ret = i2c_bit_algo.master_xfer(adapter, msgs, num); ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
out: out:
@ -491,7 +491,7 @@ int intel_setup_gmbus(struct drm_device *dev)
/* gmbus seems to be broken on i830 */ /* gmbus seems to be broken on i830 */
if (IS_I830(dev)) if (IS_I830(dev))
bus->force_bit = true; bus->force_bit = 1;
intel_gpio_setup(bus, port); intel_gpio_setup(bus, port);
@ -532,7 +532,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{ {
struct intel_gmbus *bus = to_intel_gmbus(adapter); struct intel_gmbus *bus = to_intel_gmbus(adapter);
bus->force_bit = force_bit; bus->force_bit += force_bit ? 1 : -1;
DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
force_bit ? "en" : "dis", adapter->name,
bus->force_bit);
} }
void intel_teardown_gmbus(struct drm_device *dev) void intel_teardown_gmbus(struct drm_device *dev)

View File

@ -40,28 +40,30 @@
#include <linux/acpi.h> #include <linux/acpi.h>
/* Private structure for the integrated LVDS support */ /* Private structure for the integrated LVDS support */
struct intel_lvds { struct intel_lvds_connector {
struct intel_connector base;
struct notifier_block lid_notifier;
};
struct intel_lvds_encoder {
struct intel_encoder base; struct intel_encoder base;
struct edid *edid;
int fitting_mode;
u32 pfit_control; u32 pfit_control;
u32 pfit_pgm_ratios; u32 pfit_pgm_ratios;
bool pfit_dirty; bool pfit_dirty;
struct drm_display_mode *fixed_mode; struct intel_lvds_connector *attached_connector;
}; };
static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder) static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
{ {
return container_of(encoder, struct intel_lvds, base.base); return container_of(encoder, struct intel_lvds_encoder, base.base);
} }
static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
{ {
return container_of(intel_attached_encoder(connector), return container_of(connector, struct intel_lvds_connector, base.base);
struct intel_lvds, base);
} }
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
@ -96,7 +98,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
static void intel_enable_lvds(struct intel_encoder *encoder) static void intel_enable_lvds(struct intel_encoder *encoder)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg; u32 ctl_reg, lvds_reg, stat_reg;
@ -113,7 +115,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
if (intel_lvds->pfit_dirty) { if (lvds_encoder->pfit_dirty) {
/* /*
* Enable automatic panel scaling so that non-native modes * Enable automatic panel scaling so that non-native modes
* fill the screen. The panel fitter should only be * fill the screen. The panel fitter should only be
@ -121,12 +123,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
* register description and PRM. * register description and PRM.
*/ */
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
intel_lvds->pfit_control, lvds_encoder->pfit_control,
intel_lvds->pfit_pgm_ratios); lvds_encoder->pfit_pgm_ratios);
I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
intel_lvds->pfit_dirty = false; lvds_encoder->pfit_dirty = false;
} }
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
@ -140,7 +142,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
static void intel_disable_lvds(struct intel_encoder *encoder) static void intel_disable_lvds(struct intel_encoder *encoder)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg; u32 ctl_reg, lvds_reg, stat_reg;
@ -160,9 +162,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n"); DRM_ERROR("timed out waiting for panel to power off\n");
if (intel_lvds->pfit_control) { if (lvds_encoder->pfit_control) {
I915_WRITE(PFIT_CONTROL, 0); I915_WRITE(PFIT_CONTROL, 0);
intel_lvds->pfit_dirty = true; lvds_encoder->pfit_dirty = true;
} }
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
@ -172,8 +174,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
static int intel_lvds_mode_valid(struct drm_connector *connector, static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode; struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
if (mode->hdisplay > fixed_mode->hdisplay) if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL; return MODE_PANEL;
@ -249,8 +251,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc; struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe; int pipe;
@ -260,7 +264,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false; return false;
} }
if (intel_encoder_check_is_cloned(&intel_lvds->base)) if (intel_encoder_check_is_cloned(&lvds_encoder->base))
return false; return false;
/* /*
@ -269,10 +273,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* with the panel scaling set up to source from the H/VDisplay * with the panel scaling set up to source from the H/VDisplay
* of the original mode. * of the original mode.
*/ */
intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode); intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, intel_pch_panel_fitting(dev,
intel_connector->panel.fitting_mode,
mode, adjusted_mode); mode, adjusted_mode);
return true; return true;
} }
@ -298,7 +304,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0); drm_mode_set_crtcinfo(adjusted_mode, 0);
switch (intel_lvds->fitting_mode) { switch (intel_connector->panel.fitting_mode) {
case DRM_MODE_SCALE_CENTER: case DRM_MODE_SCALE_CENTER:
/* /*
* For centered modes, we have to calculate border widths & * For centered modes, we have to calculate border widths &
@ -396,11 +402,11 @@ out:
if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE; pfit_control |= PANEL_8TO6_DITHER_ENABLE;
if (pfit_control != intel_lvds->pfit_control || if (pfit_control != lvds_encoder->pfit_control ||
pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
intel_lvds->pfit_control = pfit_control; lvds_encoder->pfit_control = pfit_control;
intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
intel_lvds->pfit_dirty = true; lvds_encoder->pfit_dirty = true;
} }
dev_priv->lvds_border_bits = border; dev_priv->lvds_border_bits = border;
@ -449,14 +455,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
*/ */
static int intel_lvds_get_modes(struct drm_connector *connector) static int intel_lvds_get_modes(struct drm_connector *connector)
{ {
struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct drm_display_mode *mode; struct drm_display_mode *mode;
if (intel_lvds->edid) /* use cached edid if we have one */
return drm_add_edid_modes(connector, intel_lvds->edid); if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
return drm_add_edid_modes(connector, lvds_connector->base.edid);
mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
if (mode == NULL) if (mode == NULL)
return 0; return 0;
@ -496,10 +503,11 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
static int intel_lid_notify(struct notifier_block *nb, unsigned long val, static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused) void *unused)
{ {
struct drm_i915_private *dev_priv = struct intel_lvds_connector *lvds_connector =
container_of(nb, struct drm_i915_private, lid_notifier); container_of(nb, struct intel_lvds_connector, lid_notifier);
struct drm_device *dev = dev_priv->dev; struct drm_connector *connector = &lvds_connector->base.base;
struct drm_connector *connector = dev_priv->int_lvds_connector; struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev->switch_power_state != DRM_SWITCH_POWER_ON) if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK; return NOTIFY_OK;
@ -508,9 +516,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* check and update the status of LVDS connector after receiving * check and update the status of LVDS connector after receiving
* the LID nofication event. * the LID nofication event.
*/ */
if (connector) connector->status = connector->funcs->detect(connector, false);
connector->status = connector->funcs->detect(connector,
false);
/* Don't force modeset on machines where it causes a GPU lockup */ /* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid)) if (dmi_check_system(intel_no_modeset_on_lid))
@ -526,7 +532,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
dev_priv->modeset_on_lid = 0; dev_priv->modeset_on_lid = 0;
mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->mode_config.mutex);
intel_modeset_check_state(dev); intel_modeset_setup_hw_state(dev, true);
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
return NOTIFY_OK; return NOTIFY_OK;
@ -541,13 +547,18 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
*/ */
static void intel_lvds_destroy(struct drm_connector *connector) static void intel_lvds_destroy(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct intel_lvds_connector *lvds_connector =
struct drm_i915_private *dev_priv = dev->dev_private; to_lvds_connector(connector);
intel_panel_destroy_backlight(dev); if (lvds_connector->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
kfree(lvds_connector->base.edid);
intel_panel_destroy_backlight(connector->dev);
intel_panel_fini(&lvds_connector->base.panel);
if (dev_priv->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector); drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
kfree(connector); kfree(connector);
@ -557,22 +568,24 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property, struct drm_property *property,
uint64_t value) uint64_t value)
{ {
struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
if (property == dev->mode_config.scaling_mode_property) { if (property == dev->mode_config.scaling_mode_property) {
struct drm_crtc *crtc = intel_lvds->base.base.crtc; struct drm_crtc *crtc;
if (value == DRM_MODE_SCALE_NONE) { if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n"); DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL; return -EINVAL;
} }
if (intel_lvds->fitting_mode == value) { if (intel_connector->panel.fitting_mode == value) {
/* the LVDS scaling property is not changed */ /* the LVDS scaling property is not changed */
return 0; return 0;
} }
intel_lvds->fitting_mode = value; intel_connector->panel.fitting_mode = value;
crtc = intel_attached_encoder(connector)->base.crtc;
if (crtc && crtc->enabled) { if (crtc && crtc->enabled) {
/* /*
* If the CRTC is enabled, the display will be changed * If the CRTC is enabled, the display will be changed
@ -912,12 +925,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
bool intel_lvds_init(struct drm_device *dev) bool intel_lvds_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds; struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder; struct intel_encoder *intel_encoder;
struct intel_lvds_connector *lvds_connector;
struct intel_connector *intel_connector; struct intel_connector *intel_connector;
struct drm_connector *connector; struct drm_connector *connector;
struct drm_encoder *encoder; struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */ struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_display_mode *fixed_mode = NULL;
struct edid *edid;
struct drm_crtc *crtc; struct drm_crtc *crtc;
u32 lvds; u32 lvds;
int pipe; int pipe;
@ -945,23 +961,25 @@ bool intel_lvds_init(struct drm_device *dev)
} }
} }
intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
if (!intel_lvds) { if (!lvds_encoder)
return false;
lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
if (!lvds_connector) {
kfree(lvds_encoder);
return false; return false;
} }
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); lvds_encoder->attached_connector = lvds_connector;
if (!intel_connector) {
kfree(intel_lvds);
return false;
}
if (!HAS_PCH_SPLIT(dev)) { if (!HAS_PCH_SPLIT(dev)) {
intel_lvds->pfit_control = I915_READ(PFIT_CONTROL); lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
} }
intel_encoder = &intel_lvds->base; intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base; encoder = &intel_encoder->base;
intel_connector = &lvds_connector->base;
connector = &intel_connector->base; connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS); DRM_MODE_CONNECTOR_LVDS);
@ -993,14 +1011,10 @@ bool intel_lvds_init(struct drm_device *dev)
/* create the scaling mode property */ /* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev); drm_mode_create_scaling_mode_property(dev);
/* drm_object_attach_property(&connector->base,
* the initial panel fitting mode will be FULL_SCREEN.
*/
drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property, dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT); DRM_MODE_SCALE_ASPECT);
intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT; intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
/* /*
* LVDS discovery: * LVDS discovery:
* 1) check for EDID on DDC * 1) check for EDID on DDC
@ -1015,20 +1029,21 @@ bool intel_lvds_init(struct drm_device *dev)
* Attempt to get the fixed panel mode from DDC. Assume that the * Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one. * preferred mode is the right one.
*/ */
intel_lvds->edid = drm_get_edid(connector, edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
intel_gmbus_get_adapter(dev_priv, if (edid) {
pin)); if (drm_add_edid_modes(connector, edid)) {
if (intel_lvds->edid) {
if (drm_add_edid_modes(connector,
intel_lvds->edid)) {
drm_mode_connector_update_edid_property(connector, drm_mode_connector_update_edid_property(connector,
intel_lvds->edid); edid);
} else { } else {
kfree(intel_lvds->edid); kfree(edid);
intel_lvds->edid = NULL; edid = ERR_PTR(-EINVAL);
} }
} else {
edid = ERR_PTR(-ENOENT);
} }
if (!intel_lvds->edid) { lvds_connector->base.edid = edid;
if (IS_ERR_OR_NULL(edid)) {
/* Didn't get an EDID, so /* Didn't get an EDID, so
* Set wide sync ranges so we get all modes * Set wide sync ranges so we get all modes
* handed to valid_mode for checking * handed to valid_mode for checking
@ -1041,22 +1056,26 @@ bool intel_lvds_init(struct drm_device *dev)
list_for_each_entry(scan, &connector->probed_modes, head) { list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) { if (scan->type & DRM_MODE_TYPE_PREFERRED) {
intel_lvds->fixed_mode = DRM_DEBUG_KMS("using preferred mode from EDID: ");
drm_mode_duplicate(dev, scan); drm_mode_debug_printmodeline(scan);
intel_find_lvds_downclock(dev,
intel_lvds->fixed_mode, fixed_mode = drm_mode_duplicate(dev, scan);
connector); if (fixed_mode) {
goto out; intel_find_lvds_downclock(dev, fixed_mode,
connector);
goto out;
}
} }
} }
/* Failed to get EDID, what about VBT? */ /* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) { if (dev_priv->lfp_lvds_vbt_mode) {
intel_lvds->fixed_mode = DRM_DEBUG_KMS("using mode from VBT: ");
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
if (intel_lvds->fixed_mode) {
intel_lvds->fixed_mode->type |= fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
DRM_MODE_TYPE_PREFERRED; if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out; goto out;
} }
} }
@ -1076,16 +1095,17 @@ bool intel_lvds_init(struct drm_device *dev)
crtc = intel_get_crtc_for_pipe(dev, pipe); crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) { if (crtc && (lvds & LVDS_PORT_EN)) {
intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc); fixed_mode = intel_crtc_mode_get(dev, crtc);
if (intel_lvds->fixed_mode) { if (fixed_mode) {
intel_lvds->fixed_mode->type |= DRM_DEBUG_KMS("using current (BIOS) mode: ");
DRM_MODE_TYPE_PREFERRED; drm_mode_debug_printmodeline(fixed_mode);
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out; goto out;
} }
} }
/* If we still don't have a mode after all that, give up. */ /* If we still don't have a mode after all that, give up. */
if (!intel_lvds->fixed_mode) if (!fixed_mode)
goto failed; goto failed;
out: out:
@ -1100,16 +1120,15 @@ out:
I915_WRITE(PP_CONTROL, I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
} }
dev_priv->lid_notifier.notifier_call = intel_lid_notify; lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
DRM_DEBUG_KMS("lid notifier registration failed\n"); DRM_DEBUG_KMS("lid notifier registration failed\n");
dev_priv->lid_notifier.notifier_call = NULL; lvds_connector->lid_notifier.notifier_call = NULL;
} }
/* keep the LVDS connector */
dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector); drm_sysfs_connector_add(connector);
intel_panel_setup_backlight(dev); intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_setup_backlight(connector);
return true; return true;
@ -1117,7 +1136,9 @@ failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder); drm_encoder_cleanup(encoder);
kfree(intel_lvds); if (fixed_mode)
kfree(intel_connector); drm_mode_destroy(dev, fixed_mode);
kfree(lvds_encoder);
kfree(lvds_connector);
return false; return false;
} }

View File

@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
drm_mode_connector_update_edid_property(connector, edid); drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid); ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid); drm_edid_to_eld(connector, edid);
kfree(edid);
return ret; return ret;
} }
@ -61,12 +60,16 @@ int intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter) struct i2c_adapter *adapter)
{ {
struct edid *edid; struct edid *edid;
int ret;
edid = drm_get_edid(connector, adapter); edid = drm_get_edid(connector, adapter);
if (!edid) if (!edid)
return 0; return 0;
return intel_connector_update_modes(connector, edid); ret = intel_connector_update_modes(connector, edid);
kfree(edid);
return ret;
} }
static const struct drm_prop_enum_list force_audio_names[] = { static const struct drm_prop_enum_list force_audio_names[] = {
@ -94,7 +97,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
dev_priv->force_audio_property = prop; dev_priv->force_audio_property = prop;
} }
drm_connector_attach_property(connector, prop, 0); drm_object_attach_property(&connector->base, prop, 0);
} }
static const struct drm_prop_enum_list broadcast_rgb_names[] = { static const struct drm_prop_enum_list broadcast_rgb_names[] = {
@ -121,5 +124,5 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
dev_priv->broadcast_rgb_property = prop; dev_priv->broadcast_rgb_property = prop;
} }
drm_connector_attach_property(connector, prop, 0); drm_object_attach_property(&connector->base, prop, 0);
} }

Some files were not shown because too many files have changed in this diff Show More