Convert struct intel_ringbuffer::irq_queue to drm waitqueues.diff -r1.1.1.1.2.7 -r1.1.1.1.2.8 src/sys/external/bsd/drm2/dist/drm/i915/i915_dma.c
(riastradh)
--- src/sys/external/bsd/drm2/dist/drm/i915/Attic/i915_dma.c 2013/07/24 03:05:41 1.1.1.1.2.7
+++ src/sys/external/bsd/drm2/dist/drm/i915/Attic/i915_dma.c 2013/07/24 03:06:00 1.1.1.1.2.8
@@ -1,1805 +1,1811 @@ | @@ -1,1805 +1,1811 @@ | |||
1 | /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- | 1 | /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- | |
2 | */ | 2 | */ | |
3 | /* | 3 | /* | |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | 4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | |
5 | * All Rights Reserved. | 5 | * All Rights Reserved. | |
6 | * | 6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | 7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | 8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | 9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | 10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | 11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | 12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | 13 | * the following conditions: | |
14 | * | 14 | * | |
15 | * The above copyright notice and this permission notice (including the | 15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | 16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | 17 | * of the Software. | |
18 | * | 18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | 21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | 22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | 23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | 24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | 26 | * | |
27 | */ | 27 | */ | |
28 | 28 | |||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
30 | 30 | |||
31 | #include <drm/drmP.h> | 31 | #include <drm/drmP.h> | |
32 | #include <drm/drm_crtc_helper.h> | 32 | #include <drm/drm_crtc_helper.h> | |
33 | #ifndef __NetBSD__ /* XXX fb */ | 33 | #ifndef __NetBSD__ /* XXX fb */ | |
34 | #include <drm/drm_fb_helper.h> | 34 | #include <drm/drm_fb_helper.h> | |
35 | #endif | 35 | #endif | |
36 | #include "intel_drv.h" | 36 | #include "intel_drv.h" | |
37 | #include <drm/i915_drm.h> | 37 | #include <drm/i915_drm.h> | |
38 | #include "i915_drv.h" | 38 | #include "i915_drv.h" | |
39 | #include "i915_trace.h" | 39 | #include "i915_trace.h" | |
40 | #include <linux/pci.h> | 40 | #include <linux/pci.h> | |
41 | #include <linux/vgaarb.h> | 41 | #include <linux/vgaarb.h> | |
42 | #include <linux/acpi.h> | 42 | #include <linux/acpi.h> | |
43 | #include <linux/pnp.h> | 43 | #include <linux/pnp.h> | |
44 | #include <linux/vga_switcheroo.h> | 44 | #include <linux/vga_switcheroo.h> | |
45 | #include <linux/slab.h> | 45 | #include <linux/slab.h> | |
46 | #include <acpi/video.h> | 46 | #include <acpi/video.h> | |
47 | #include <asm/pat.h> | 47 | #include <asm/pat.h> | |
48 | 48 | |||
49 | #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) | 49 | #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) | |
50 | 50 | |||
51 | #define BEGIN_LP_RING(n) \ | 51 | #define BEGIN_LP_RING(n) \ | |
52 | intel_ring_begin(LP_RING(dev_priv), (n)) | 52 | intel_ring_begin(LP_RING(dev_priv), (n)) | |
53 | 53 | |||
54 | #define OUT_RING(x) \ | 54 | #define OUT_RING(x) \ | |
55 | intel_ring_emit(LP_RING(dev_priv), x) | 55 | intel_ring_emit(LP_RING(dev_priv), x) | |
56 | 56 | |||
57 | #define ADVANCE_LP_RING() \ | 57 | #define ADVANCE_LP_RING() \ | |
58 | intel_ring_advance(LP_RING(dev_priv)) | 58 | intel_ring_advance(LP_RING(dev_priv)) | |
59 | 59 | |||
60 | /** | 60 | /** | |
61 | * Lock test for when it's just for synchronization of ring access. | 61 | * Lock test for when it's just for synchronization of ring access. | |
62 | * | 62 | * | |
63 | * In that case, we don't need to do it when GEM is initialized as nobody else | 63 | * In that case, we don't need to do it when GEM is initialized as nobody else | |
64 | * has access to the ring. | 64 | * has access to the ring. | |
65 | */ | 65 | */ | |
66 | #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ | 66 | #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ | |
67 | if (LP_RING(dev->dev_private)->obj == NULL) \ | 67 | if (LP_RING(dev->dev_private)->obj == NULL) \ | |
68 | LOCK_TEST_WITH_RETURN(dev, file); \ | 68 | LOCK_TEST_WITH_RETURN(dev, file); \ | |
69 | } while (0) | 69 | } while (0) | |
70 | 70 | |||
71 | static inline u32 | 71 | static inline u32 | |
72 | intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) | 72 | intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg) | |
73 | { | 73 | { | |
74 | if (I915_NEED_GFX_HWS(dev_priv->dev)) | 74 | if (I915_NEED_GFX_HWS(dev_priv->dev)) | |
75 | #ifdef __NetBSD__ | 75 | #ifdef __NetBSD__ | |
76 | return DRM_READ32(&dev_priv->dri1.gfx_hws_cpu_map, reg); | 76 | return DRM_READ32(&dev_priv->dri1.gfx_hws_cpu_map, reg); | |
77 | #else | 77 | #else | |
78 | return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg); | 78 | return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg); | |
79 | #endif | 79 | #endif | |
80 | else | 80 | else | |
81 | return intel_read_status_page(LP_RING(dev_priv), reg); | 81 | return intel_read_status_page(LP_RING(dev_priv), reg); | |
82 | } | 82 | } | |
83 | 83 | |||
84 | #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg) | 84 | #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg) | |
85 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) | 85 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) | |
86 | #define I915_BREADCRUMB_INDEX 0x21 | 86 | #define I915_BREADCRUMB_INDEX 0x21 | |
87 | 87 | |||
88 | void i915_update_dri1_breadcrumb(struct drm_device *dev) | 88 | void i915_update_dri1_breadcrumb(struct drm_device *dev) | |
89 | { | 89 | { | |
90 | drm_i915_private_t *dev_priv = dev->dev_private; | 90 | drm_i915_private_t *dev_priv = dev->dev_private; | |
91 | struct drm_i915_master_private *master_priv; | 91 | struct drm_i915_master_private *master_priv; | |
92 | 92 | |||
93 | if (dev->primary->master) { | 93 | if (dev->primary->master) { | |
94 | master_priv = dev->primary->master->driver_priv; | 94 | master_priv = dev->primary->master->driver_priv; | |
95 | if (master_priv->sarea_priv) | 95 | if (master_priv->sarea_priv) | |
96 | master_priv->sarea_priv->last_dispatch = | 96 | master_priv->sarea_priv->last_dispatch = | |
97 | READ_BREADCRUMB(dev_priv); | 97 | READ_BREADCRUMB(dev_priv); | |
98 | } | 98 | } | |
99 | } | 99 | } | |
100 | 100 | |||
101 | static void i915_write_hws_pga(struct drm_device *dev) | 101 | static void i915_write_hws_pga(struct drm_device *dev) | |
102 | { | 102 | { | |
103 | drm_i915_private_t *dev_priv = dev->dev_private; | 103 | drm_i915_private_t *dev_priv = dev->dev_private; | |
104 | u32 addr; | 104 | u32 addr; | |
105 | 105 | |||
106 | addr = dev_priv->status_page_dmah->busaddr; | 106 | addr = dev_priv->status_page_dmah->busaddr; | |
107 | if (INTEL_INFO(dev)->gen >= 4) | 107 | if (INTEL_INFO(dev)->gen >= 4) | |
108 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; | 108 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; | |
109 | I915_WRITE(HWS_PGA, addr); | 109 | I915_WRITE(HWS_PGA, addr); | |
110 | } | 110 | } | |
111 | 111 | |||
112 | /** | 112 | /** | |
113 | * Frees the hardware status page, whether it's a physical address or a virtual | 113 | * Frees the hardware status page, whether it's a physical address or a virtual | |
114 | * address set up by the X Server. | 114 | * address set up by the X Server. | |
115 | */ | 115 | */ | |
116 | static void i915_free_hws(struct drm_device *dev) | 116 | static void i915_free_hws(struct drm_device *dev) | |
117 | { | 117 | { | |
118 | drm_i915_private_t *dev_priv = dev->dev_private; | 118 | drm_i915_private_t *dev_priv = dev->dev_private; | |
119 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | 119 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | |
120 | 120 | |||
121 | if (dev_priv->status_page_dmah) { | 121 | if (dev_priv->status_page_dmah) { | |
122 | drm_pci_free(dev, dev_priv->status_page_dmah); | 122 | drm_pci_free(dev, dev_priv->status_page_dmah); | |
123 | dev_priv->status_page_dmah = NULL; | 123 | dev_priv->status_page_dmah = NULL; | |
124 | } | 124 | } | |
125 | 125 | |||
126 | if (ring->status_page.gfx_addr) { | 126 | if (ring->status_page.gfx_addr) { | |
127 | ring->status_page.gfx_addr = 0; | 127 | ring->status_page.gfx_addr = 0; | |
128 | #ifdef __NetBSD__ | 128 | #ifdef __NetBSD__ | |
129 | drm_iounmap(dev, &dev_priv->dri1.gfx_hws_cpu_map); | 129 | drm_iounmap(dev, &dev_priv->dri1.gfx_hws_cpu_map); | |
130 | #else | 130 | #else | |
131 | iounmap(dev_priv->dri1.gfx_hws_cpu_addr); | 131 | iounmap(dev_priv->dri1.gfx_hws_cpu_addr); | |
132 | #endif | 132 | #endif | |
133 | } | 133 | } | |
134 | 134 | |||
135 | /* Need to rewrite hardware status page */ | 135 | /* Need to rewrite hardware status page */ | |
136 | I915_WRITE(HWS_PGA, 0x1ffff000); | 136 | I915_WRITE(HWS_PGA, 0x1ffff000); | |
137 | } | 137 | } | |
138 | 138 | |||
139 | void i915_kernel_lost_context(struct drm_device * dev) | 139 | void i915_kernel_lost_context(struct drm_device * dev) | |
140 | { | 140 | { | |
141 | drm_i915_private_t *dev_priv = dev->dev_private; | 141 | drm_i915_private_t *dev_priv = dev->dev_private; | |
142 | struct drm_i915_master_private *master_priv; | 142 | struct drm_i915_master_private *master_priv; | |
143 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | 143 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | |
144 | 144 | |||
145 | /* | 145 | /* | |
146 | * We should never lose context on the ring with modesetting | 146 | * We should never lose context on the ring with modesetting | |
147 | * as we don't expose it to userspace | 147 | * as we don't expose it to userspace | |
148 | */ | 148 | */ | |
149 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 149 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
150 | return; | 150 | return; | |
151 | 151 | |||
152 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; | 152 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; | |
153 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 153 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | |
154 | ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE); | 154 | ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE); | |
155 | if (ring->space < 0) | 155 | if (ring->space < 0) | |
156 | ring->space += ring->size; | 156 | ring->space += ring->size; | |
157 | 157 | |||
158 | if (!dev->primary->master) | 158 | if (!dev->primary->master) | |
159 | return; | 159 | return; | |
160 | 160 | |||
161 | master_priv = dev->primary->master->driver_priv; | 161 | master_priv = dev->primary->master->driver_priv; | |
162 | if (ring->head == ring->tail && master_priv->sarea_priv) | 162 | if (ring->head == ring->tail && master_priv->sarea_priv) | |
163 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; | 163 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; | |
164 | } | 164 | } | |
165 | 165 | |||
166 | static int i915_dma_cleanup(struct drm_device * dev) | 166 | static int i915_dma_cleanup(struct drm_device * dev) | |
167 | { | 167 | { | |
168 | drm_i915_private_t *dev_priv = dev->dev_private; | 168 | drm_i915_private_t *dev_priv = dev->dev_private; | |
169 | int i; | 169 | int i; | |
170 | 170 | |||
171 | /* Make sure interrupts are disabled here because the uninstall ioctl | 171 | /* Make sure interrupts are disabled here because the uninstall ioctl | |
172 | * may not have been called from userspace and after dev_private | 172 | * may not have been called from userspace and after dev_private | |
173 | * is freed, it's too late. | 173 | * is freed, it's too late. | |
174 | */ | 174 | */ | |
175 | if (dev->irq_enabled) | 175 | if (dev->irq_enabled) | |
176 | drm_irq_uninstall(dev); | 176 | drm_irq_uninstall(dev); | |
177 | 177 | |||
178 | mutex_lock(&dev->struct_mutex); | 178 | mutex_lock(&dev->struct_mutex); | |
179 | for (i = 0; i < I915_NUM_RINGS; i++) | 179 | for (i = 0; i < I915_NUM_RINGS; i++) | |
180 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); | 180 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); | |
181 | mutex_unlock(&dev->struct_mutex); | 181 | mutex_unlock(&dev->struct_mutex); | |
182 | 182 | |||
183 | /* Clear the HWS virtual address at teardown */ | 183 | /* Clear the HWS virtual address at teardown */ | |
184 | if (I915_NEED_GFX_HWS(dev)) | 184 | if (I915_NEED_GFX_HWS(dev)) | |
185 | i915_free_hws(dev); | 185 | i915_free_hws(dev); | |
186 | 186 | |||
187 | return 0; | 187 | return 0; | |
188 | } | 188 | } | |
189 | 189 | |||
190 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | 190 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |
191 | { | 191 | { | |
192 | drm_i915_private_t *dev_priv = dev->dev_private; | 192 | drm_i915_private_t *dev_priv = dev->dev_private; | |
193 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 193 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
194 | int ret; | 194 | int ret; | |
195 | 195 | |||
196 | master_priv->sarea = drm_getsarea(dev); | 196 | master_priv->sarea = drm_getsarea(dev); | |
197 | if (master_priv->sarea) { | 197 | if (master_priv->sarea) { | |
198 | master_priv->sarea_priv = (drm_i915_sarea_t *) | 198 | master_priv->sarea_priv = (drm_i915_sarea_t *) | |
199 | ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); | 199 | ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); | |
200 | } else { | 200 | } else { | |
201 | DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); | 201 | DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); | |
202 | } | 202 | } | |
203 | 203 | |||
204 | if (init->ring_size != 0) { | 204 | if (init->ring_size != 0) { | |
205 | if (LP_RING(dev_priv)->obj != NULL) { | 205 | if (LP_RING(dev_priv)->obj != NULL) { | |
206 | i915_dma_cleanup(dev); | 206 | i915_dma_cleanup(dev); | |
207 | DRM_ERROR("Client tried to initialize ringbuffer in " | 207 | DRM_ERROR("Client tried to initialize ringbuffer in " | |
208 | "GEM mode\n"); | 208 | "GEM mode\n"); | |
209 | return -EINVAL; | 209 | return -EINVAL; | |
210 | } | 210 | } | |
211 | 211 | |||
212 | ret = intel_render_ring_init_dri(dev, | 212 | ret = intel_render_ring_init_dri(dev, | |
213 | init->ring_start, | 213 | init->ring_start, | |
214 | init->ring_size); | 214 | init->ring_size); | |
215 | if (ret) { | 215 | if (ret) { | |
216 | i915_dma_cleanup(dev); | 216 | i915_dma_cleanup(dev); | |
217 | return ret; | 217 | return ret; | |
218 | } | 218 | } | |
219 | } | 219 | } | |
220 | 220 | |||
221 | dev_priv->dri1.cpp = init->cpp; | 221 | dev_priv->dri1.cpp = init->cpp; | |
222 | dev_priv->dri1.back_offset = init->back_offset; | 222 | dev_priv->dri1.back_offset = init->back_offset; | |
223 | dev_priv->dri1.front_offset = init->front_offset; | 223 | dev_priv->dri1.front_offset = init->front_offset; | |
224 | dev_priv->dri1.current_page = 0; | 224 | dev_priv->dri1.current_page = 0; | |
225 | if (master_priv->sarea_priv) | 225 | if (master_priv->sarea_priv) | |
226 | master_priv->sarea_priv->pf_current_page = 0; | 226 | master_priv->sarea_priv->pf_current_page = 0; | |
227 | 227 | |||
228 | /* Allow hardware batchbuffers unless told otherwise. | 228 | /* Allow hardware batchbuffers unless told otherwise. | |
229 | */ | 229 | */ | |
230 | dev_priv->dri1.allow_batchbuffer = 1; | 230 | dev_priv->dri1.allow_batchbuffer = 1; | |
231 | 231 | |||
232 | return 0; | 232 | return 0; | |
233 | } | 233 | } | |
234 | 234 | |||
235 | static int i915_dma_resume(struct drm_device * dev) | 235 | static int i915_dma_resume(struct drm_device * dev) | |
236 | { | 236 | { | |
237 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 237 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
238 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | 238 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | |
239 | 239 | |||
240 | DRM_DEBUG_DRIVER("%s\n", __func__); | 240 | DRM_DEBUG_DRIVER("%s\n", __func__); | |
241 | 241 | |||
242 | #ifdef __NetBSD__ | 242 | #ifdef __NetBSD__ | |
243 | if (!ring->virtual_start_mapped) { | 243 | if (!ring->virtual_start_mapped) { | |
244 | DRM_ERROR("can not ioremap virtual address for" | 244 | DRM_ERROR("can not ioremap virtual address for" | |
245 | " ring buffer\n"); | 245 | " ring buffer\n"); | |
246 | return -ENOMEM; | 246 | return -ENOMEM; | |
247 | } | 247 | } | |
248 | #else | 248 | #else | |
249 | if (ring->virtual_start == NULL) { | 249 | if (ring->virtual_start == NULL) { | |
250 | DRM_ERROR("can not ioremap virtual address for" | 250 | DRM_ERROR("can not ioremap virtual address for" | |
251 | " ring buffer\n"); | 251 | " ring buffer\n"); | |
252 | return -ENOMEM; | 252 | return -ENOMEM; | |
253 | } | 253 | } | |
254 | #endif | 254 | #endif | |
255 | 255 | |||
256 | /* Program Hardware Status Page */ | 256 | /* Program Hardware Status Page */ | |
257 | if (!ring->status_page.page_addr) { | 257 | if (!ring->status_page.page_addr) { | |
258 | DRM_ERROR("Can not find hardware status page\n"); | 258 | DRM_ERROR("Can not find hardware status page\n"); | |
259 | return -EINVAL; | 259 | return -EINVAL; | |
260 | } | 260 | } | |
261 | DRM_DEBUG_DRIVER("hw status page @ %p\n", | 261 | DRM_DEBUG_DRIVER("hw status page @ %p\n", | |
262 | ring->status_page.page_addr); | 262 | ring->status_page.page_addr); | |
263 | if (ring->status_page.gfx_addr != 0) | 263 | if (ring->status_page.gfx_addr != 0) | |
264 | intel_ring_setup_status_page(ring); | 264 | intel_ring_setup_status_page(ring); | |
265 | else | 265 | else | |
266 | i915_write_hws_pga(dev); | 266 | i915_write_hws_pga(dev); | |
267 | 267 | |||
268 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | 268 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | |
269 | 269 | |||
270 | return 0; | 270 | return 0; | |
271 | } | 271 | } | |
272 | 272 | |||
273 | static int i915_dma_init(struct drm_device *dev, void *data, | 273 | static int i915_dma_init(struct drm_device *dev, void *data, | |
274 | struct drm_file *file_priv) | 274 | struct drm_file *file_priv) | |
275 | { | 275 | { | |
276 | drm_i915_init_t *init = data; | 276 | drm_i915_init_t *init = data; | |
277 | int retcode = 0; | 277 | int retcode = 0; | |
278 | 278 | |||
279 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 279 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
280 | return -ENODEV; | 280 | return -ENODEV; | |
281 | 281 | |||
282 | switch (init->func) { | 282 | switch (init->func) { | |
283 | case I915_INIT_DMA: | 283 | case I915_INIT_DMA: | |
284 | retcode = i915_initialize(dev, init); | 284 | retcode = i915_initialize(dev, init); | |
285 | break; | 285 | break; | |
286 | case I915_CLEANUP_DMA: | 286 | case I915_CLEANUP_DMA: | |
287 | retcode = i915_dma_cleanup(dev); | 287 | retcode = i915_dma_cleanup(dev); | |
288 | break; | 288 | break; | |
289 | case I915_RESUME_DMA: | 289 | case I915_RESUME_DMA: | |
290 | retcode = i915_dma_resume(dev); | 290 | retcode = i915_dma_resume(dev); | |
291 | break; | 291 | break; | |
292 | default: | 292 | default: | |
293 | retcode = -EINVAL; | 293 | retcode = -EINVAL; | |
294 | break; | 294 | break; | |
295 | } | 295 | } | |
296 | 296 | |||
297 | return retcode; | 297 | return retcode; | |
298 | } | 298 | } | |
299 | 299 | |||
300 | /* Implement basically the same security restrictions as hardware does | 300 | /* Implement basically the same security restrictions as hardware does | |
301 | * for MI_BATCH_NON_SECURE. These can be made stricter at any time. | 301 | * for MI_BATCH_NON_SECURE. These can be made stricter at any time. | |
302 | * | 302 | * | |
303 | * Most of the calculations below involve calculating the size of a | 303 | * Most of the calculations below involve calculating the size of a | |
304 | * particular instruction. It's important to get the size right as | 304 | * particular instruction. It's important to get the size right as | |
305 | * that tells us where the next instruction to check is. Any illegal | 305 | * that tells us where the next instruction to check is. Any illegal | |
306 | * instruction detected will be given a size of zero, which is a | 306 | * instruction detected will be given a size of zero, which is a | |
307 | * signal to abort the rest of the buffer. | 307 | * signal to abort the rest of the buffer. | |
308 | */ | 308 | */ | |
309 | static int validate_cmd(int cmd) | 309 | static int validate_cmd(int cmd) | |
310 | { | 310 | { | |
311 | switch (((cmd >> 29) & 0x7)) { | 311 | switch (((cmd >> 29) & 0x7)) { | |
312 | case 0x0: | 312 | case 0x0: | |
313 | switch ((cmd >> 23) & 0x3f) { | 313 | switch ((cmd >> 23) & 0x3f) { | |
314 | case 0x0: | 314 | case 0x0: | |
315 | return 1; /* MI_NOOP */ | 315 | return 1; /* MI_NOOP */ | |
316 | case 0x4: | 316 | case 0x4: | |
317 | return 1; /* MI_FLUSH */ | 317 | return 1; /* MI_FLUSH */ | |
318 | default: | 318 | default: | |
319 | return 0; /* disallow everything else */ | 319 | return 0; /* disallow everything else */ | |
320 | } | 320 | } | |
321 | break; | 321 | break; | |
322 | case 0x1: | 322 | case 0x1: | |
323 | return 0; /* reserved */ | 323 | return 0; /* reserved */ | |
324 | case 0x2: | 324 | case 0x2: | |
325 | return (cmd & 0xff) + 2; /* 2d commands */ | 325 | return (cmd & 0xff) + 2; /* 2d commands */ | |
326 | case 0x3: | 326 | case 0x3: | |
327 | if (((cmd >> 24) & 0x1f) <= 0x18) | 327 | if (((cmd >> 24) & 0x1f) <= 0x18) | |
328 | return 1; | 328 | return 1; | |
329 | 329 | |||
330 | switch ((cmd >> 24) & 0x1f) { | 330 | switch ((cmd >> 24) & 0x1f) { | |
331 | case 0x1c: | 331 | case 0x1c: | |
332 | return 1; | 332 | return 1; | |
333 | case 0x1d: | 333 | case 0x1d: | |
334 | switch ((cmd >> 16) & 0xff) { | 334 | switch ((cmd >> 16) & 0xff) { | |
335 | case 0x3: | 335 | case 0x3: | |
336 | return (cmd & 0x1f) + 2; | 336 | return (cmd & 0x1f) + 2; | |
337 | case 0x4: | 337 | case 0x4: | |
338 | return (cmd & 0xf) + 2; | 338 | return (cmd & 0xf) + 2; | |
339 | default: | 339 | default: | |
340 | return (cmd & 0xffff) + 2; | 340 | return (cmd & 0xffff) + 2; | |
341 | } | 341 | } | |
342 | case 0x1e: | 342 | case 0x1e: | |
343 | if (cmd & (1 << 23)) | 343 | if (cmd & (1 << 23)) | |
344 | return (cmd & 0xffff) + 1; | 344 | return (cmd & 0xffff) + 1; | |
345 | else | 345 | else | |
346 | return 1; | 346 | return 1; | |
347 | case 0x1f: | 347 | case 0x1f: | |
348 | if ((cmd & (1 << 23)) == 0) /* inline vertices */ | 348 | if ((cmd & (1 << 23)) == 0) /* inline vertices */ | |
349 | return (cmd & 0x1ffff) + 2; | 349 | return (cmd & 0x1ffff) + 2; | |
350 | else if (cmd & (1 << 17)) /* indirect random */ | 350 | else if (cmd & (1 << 17)) /* indirect random */ | |
351 | if ((cmd & 0xffff) == 0) | 351 | if ((cmd & 0xffff) == 0) | |
352 | return 0; /* unknown length, too hard */ | 352 | return 0; /* unknown length, too hard */ | |
353 | else | 353 | else | |
354 | return (((cmd & 0xffff) + 1) / 2) + 1; | 354 | return (((cmd & 0xffff) + 1) / 2) + 1; | |
355 | else | 355 | else | |
356 | return 2; /* indirect sequential */ | 356 | return 2; /* indirect sequential */ | |
357 | default: | 357 | default: | |
358 | return 0; | 358 | return 0; | |
359 | } | 359 | } | |
360 | default: | 360 | default: | |
361 | return 0; | 361 | return 0; | |
362 | } | 362 | } | |
363 | 363 | |||
364 | return 0; | 364 | return 0; | |
365 | } | 365 | } | |
366 | 366 | |||
367 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | 367 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) | |
368 | { | 368 | { | |
369 | drm_i915_private_t *dev_priv = dev->dev_private; | 369 | drm_i915_private_t *dev_priv = dev->dev_private; | |
370 | int i, ret; | 370 | int i, ret; | |
371 | 371 | |||
372 | if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) | 372 | if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) | |
373 | return -EINVAL; | 373 | return -EINVAL; | |
374 | 374 | |||
375 | for (i = 0; i < dwords;) { | 375 | for (i = 0; i < dwords;) { | |
376 | int sz = validate_cmd(buffer[i]); | 376 | int sz = validate_cmd(buffer[i]); | |
377 | if (sz == 0 || i + sz > dwords) | 377 | if (sz == 0 || i + sz > dwords) | |
378 | return -EINVAL; | 378 | return -EINVAL; | |
379 | i += sz; | 379 | i += sz; | |
380 | } | 380 | } | |
381 | 381 | |||
382 | ret = BEGIN_LP_RING((dwords+1)&~1); | 382 | ret = BEGIN_LP_RING((dwords+1)&~1); | |
383 | if (ret) | 383 | if (ret) | |
384 | return ret; | 384 | return ret; | |
385 | 385 | |||
386 | for (i = 0; i < dwords; i++) | 386 | for (i = 0; i < dwords; i++) | |
387 | OUT_RING(buffer[i]); | 387 | OUT_RING(buffer[i]); | |
388 | if (dwords & 1) | 388 | if (dwords & 1) | |
389 | OUT_RING(0); | 389 | OUT_RING(0); | |
390 | 390 | |||
391 | ADVANCE_LP_RING(); | 391 | ADVANCE_LP_RING(); | |
392 | 392 | |||
393 | return 0; | 393 | return 0; | |
394 | } | 394 | } | |
395 | 395 | |||
396 | int | 396 | int | |
397 | i915_emit_box(struct drm_device *dev, | 397 | i915_emit_box(struct drm_device *dev, | |
398 | struct drm_clip_rect *box, | 398 | struct drm_clip_rect *box, | |
399 | int DR1, int DR4) | 399 | int DR1, int DR4) | |
400 | { | 400 | { | |
401 | struct drm_i915_private *dev_priv = dev->dev_private; | 401 | struct drm_i915_private *dev_priv = dev->dev_private; | |
402 | int ret; | 402 | int ret; | |
403 | 403 | |||
404 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || | 404 | if (box->y2 <= box->y1 || box->x2 <= box->x1 || | |
405 | box->y2 <= 0 || box->x2 <= 0) { | 405 | box->y2 <= 0 || box->x2 <= 0) { | |
406 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | 406 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | |
407 | box->x1, box->y1, box->x2, box->y2); | 407 | box->x1, box->y1, box->x2, box->y2); | |
408 | return -EINVAL; | 408 | return -EINVAL; | |
409 | } | 409 | } | |
410 | 410 | |||
411 | if (INTEL_INFO(dev)->gen >= 4) { | 411 | if (INTEL_INFO(dev)->gen >= 4) { | |
412 | ret = BEGIN_LP_RING(4); | 412 | ret = BEGIN_LP_RING(4); | |
413 | if (ret) | 413 | if (ret) | |
414 | return ret; | 414 | return ret; | |
415 | 415 | |||
416 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); | 416 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); | |
417 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); | 417 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); | |
418 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); | 418 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); | |
419 | OUT_RING(DR4); | 419 | OUT_RING(DR4); | |
420 | } else { | 420 | } else { | |
421 | ret = BEGIN_LP_RING(6); | 421 | ret = BEGIN_LP_RING(6); | |
422 | if (ret) | 422 | if (ret) | |
423 | return ret; | 423 | return ret; | |
424 | 424 | |||
425 | OUT_RING(GFX_OP_DRAWRECT_INFO); | 425 | OUT_RING(GFX_OP_DRAWRECT_INFO); | |
426 | OUT_RING(DR1); | 426 | OUT_RING(DR1); | |
427 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); | 427 | OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); | |
428 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); | 428 | OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); | |
429 | OUT_RING(DR4); | 429 | OUT_RING(DR4); | |
430 | OUT_RING(0); | 430 | OUT_RING(0); | |
431 | } | 431 | } | |
432 | ADVANCE_LP_RING(); | 432 | ADVANCE_LP_RING(); | |
433 | 433 | |||
434 | return 0; | 434 | return 0; | |
435 | } | 435 | } | |
436 | 436 | |||
437 | /* XXX: Emitting the counter should really be moved to part of the IRQ | 437 | /* XXX: Emitting the counter should really be moved to part of the IRQ | |
438 | * emit. For now, do it in both places: | 438 | * emit. For now, do it in both places: | |
439 | */ | 439 | */ | |
440 | 440 | |||
441 | static void i915_emit_breadcrumb(struct drm_device *dev) | 441 | static void i915_emit_breadcrumb(struct drm_device *dev) | |
442 | { | 442 | { | |
443 | drm_i915_private_t *dev_priv = dev->dev_private; | 443 | drm_i915_private_t *dev_priv = dev->dev_private; | |
444 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 444 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
445 | 445 | |||
446 | dev_priv->dri1.counter++; | 446 | dev_priv->dri1.counter++; | |
447 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) | 447 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) | |
448 | dev_priv->dri1.counter = 0; | 448 | dev_priv->dri1.counter = 0; | |
449 | if (master_priv->sarea_priv) | 449 | if (master_priv->sarea_priv) | |
450 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; | 450 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; | |
451 | 451 | |||
452 | if (BEGIN_LP_RING(4) == 0) { | 452 | if (BEGIN_LP_RING(4) == 0) { | |
453 | OUT_RING(MI_STORE_DWORD_INDEX); | 453 | OUT_RING(MI_STORE_DWORD_INDEX); | |
454 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 454 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
455 | OUT_RING(dev_priv->dri1.counter); | 455 | OUT_RING(dev_priv->dri1.counter); | |
456 | OUT_RING(0); | 456 | OUT_RING(0); | |
457 | ADVANCE_LP_RING(); | 457 | ADVANCE_LP_RING(); | |
458 | } | 458 | } | |
459 | } | 459 | } | |
460 | 460 | |||
461 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, | 461 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, | |
462 | drm_i915_cmdbuffer_t *cmd, | 462 | drm_i915_cmdbuffer_t *cmd, | |
463 | struct drm_clip_rect *cliprects, | 463 | struct drm_clip_rect *cliprects, | |
464 | void *cmdbuf) | 464 | void *cmdbuf) | |
465 | { | 465 | { | |
466 | int nbox = cmd->num_cliprects; | 466 | int nbox = cmd->num_cliprects; | |
467 | int i = 0, count, ret; | 467 | int i = 0, count, ret; | |
468 | 468 | |||
469 | if (cmd->sz & 0x3) { | 469 | if (cmd->sz & 0x3) { | |
470 | DRM_ERROR("alignment"); | 470 | DRM_ERROR("alignment"); | |
471 | return -EINVAL; | 471 | return -EINVAL; | |
472 | } | 472 | } | |
473 | 473 | |||
474 | i915_kernel_lost_context(dev); | 474 | i915_kernel_lost_context(dev); | |
475 | 475 | |||
476 | count = nbox ? nbox : 1; | 476 | count = nbox ? nbox : 1; | |
477 | 477 | |||
478 | for (i = 0; i < count; i++) { | 478 | for (i = 0; i < count; i++) { | |
479 | if (i < nbox) { | 479 | if (i < nbox) { | |
480 | ret = i915_emit_box(dev, &cliprects[i], | 480 | ret = i915_emit_box(dev, &cliprects[i], | |
481 | cmd->DR1, cmd->DR4); | 481 | cmd->DR1, cmd->DR4); | |
482 | if (ret) | 482 | if (ret) | |
483 | return ret; | 483 | return ret; | |
484 | } | 484 | } | |
485 | 485 | |||
486 | ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); | 486 | ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); | |
487 | if (ret) | 487 | if (ret) | |
488 | return ret; | 488 | return ret; | |
489 | } | 489 | } | |
490 | 490 | |||
491 | i915_emit_breadcrumb(dev); | 491 | i915_emit_breadcrumb(dev); | |
492 | return 0; | 492 | return 0; | |
493 | } | 493 | } | |
494 | 494 | |||
495 | static int i915_dispatch_batchbuffer(struct drm_device * dev, | 495 | static int i915_dispatch_batchbuffer(struct drm_device * dev, | |
496 | drm_i915_batchbuffer_t * batch, | 496 | drm_i915_batchbuffer_t * batch, | |
497 | struct drm_clip_rect *cliprects) | 497 | struct drm_clip_rect *cliprects) | |
498 | { | 498 | { | |
499 | struct drm_i915_private *dev_priv = dev->dev_private; | 499 | struct drm_i915_private *dev_priv = dev->dev_private; | |
500 | int nbox = batch->num_cliprects; | 500 | int nbox = batch->num_cliprects; | |
501 | int i, count, ret; | 501 | int i, count, ret; | |
502 | 502 | |||
503 | if ((batch->start | batch->used) & 0x7) { | 503 | if ((batch->start | batch->used) & 0x7) { | |
504 | DRM_ERROR("alignment"); | 504 | DRM_ERROR("alignment"); | |
505 | return -EINVAL; | 505 | return -EINVAL; | |
506 | } | 506 | } | |
507 | 507 | |||
508 | i915_kernel_lost_context(dev); | 508 | i915_kernel_lost_context(dev); | |
509 | 509 | |||
510 | count = nbox ? nbox : 1; | 510 | count = nbox ? nbox : 1; | |
511 | for (i = 0; i < count; i++) { | 511 | for (i = 0; i < count; i++) { | |
512 | if (i < nbox) { | 512 | if (i < nbox) { | |
513 | ret = i915_emit_box(dev, &cliprects[i], | 513 | ret = i915_emit_box(dev, &cliprects[i], | |
514 | batch->DR1, batch->DR4); | 514 | batch->DR1, batch->DR4); | |
515 | if (ret) | 515 | if (ret) | |
516 | return ret; | 516 | return ret; | |
517 | } | 517 | } | |
518 | 518 | |||
519 | if (!IS_I830(dev) && !IS_845G(dev)) { | 519 | if (!IS_I830(dev) && !IS_845G(dev)) { | |
520 | ret = BEGIN_LP_RING(2); | 520 | ret = BEGIN_LP_RING(2); | |
521 | if (ret) | 521 | if (ret) | |
522 | return ret; | 522 | return ret; | |
523 | 523 | |||
524 | if (INTEL_INFO(dev)->gen >= 4) { | 524 | if (INTEL_INFO(dev)->gen >= 4) { | |
525 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); | 525 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); | |
526 | OUT_RING(batch->start); | 526 | OUT_RING(batch->start); | |
527 | } else { | 527 | } else { | |
528 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); | 528 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); | |
529 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | 529 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | |
530 | } | 530 | } | |
531 | } else { | 531 | } else { | |
532 | ret = BEGIN_LP_RING(4); | 532 | ret = BEGIN_LP_RING(4); | |
533 | if (ret) | 533 | if (ret) | |
534 | return ret; | 534 | return ret; | |
535 | 535 | |||
536 | OUT_RING(MI_BATCH_BUFFER); | 536 | OUT_RING(MI_BATCH_BUFFER); | |
537 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | 537 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | |
538 | OUT_RING(batch->start + batch->used - 4); | 538 | OUT_RING(batch->start + batch->used - 4); | |
539 | OUT_RING(0); | 539 | OUT_RING(0); | |
540 | } | 540 | } | |
541 | ADVANCE_LP_RING(); | 541 | ADVANCE_LP_RING(); | |
542 | } | 542 | } | |
543 | 543 | |||
544 | 544 | |||
545 | if (IS_G4X(dev) || IS_GEN5(dev)) { | 545 | if (IS_G4X(dev) || IS_GEN5(dev)) { | |
546 | if (BEGIN_LP_RING(2) == 0) { | 546 | if (BEGIN_LP_RING(2) == 0) { | |
547 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); | 547 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); | |
548 | OUT_RING(MI_NOOP); | 548 | OUT_RING(MI_NOOP); | |
549 | ADVANCE_LP_RING(); | 549 | ADVANCE_LP_RING(); | |
550 | } | 550 | } | |
551 | } | 551 | } | |
552 | 552 | |||
553 | i915_emit_breadcrumb(dev); | 553 | i915_emit_breadcrumb(dev); | |
554 | return 0; | 554 | return 0; | |
555 | } | 555 | } | |
556 | 556 | |||
557 | static int i915_dispatch_flip(struct drm_device * dev) | 557 | static int i915_dispatch_flip(struct drm_device * dev) | |
558 | { | 558 | { | |
559 | drm_i915_private_t *dev_priv = dev->dev_private; | 559 | drm_i915_private_t *dev_priv = dev->dev_private; | |
560 | struct drm_i915_master_private *master_priv = | 560 | struct drm_i915_master_private *master_priv = | |
561 | dev->primary->master->driver_priv; | 561 | dev->primary->master->driver_priv; | |
562 | int ret; | 562 | int ret; | |
563 | 563 | |||
564 | if (!master_priv->sarea_priv) | 564 | if (!master_priv->sarea_priv) | |
565 | return -EINVAL; | 565 | return -EINVAL; | |
566 | 566 | |||
567 | DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", | 567 | DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", | |
568 | __func__, | 568 | __func__, | |
569 | dev_priv->dri1.current_page, | 569 | dev_priv->dri1.current_page, | |
570 | master_priv->sarea_priv->pf_current_page); | 570 | master_priv->sarea_priv->pf_current_page); | |
571 | 571 | |||
572 | i915_kernel_lost_context(dev); | 572 | i915_kernel_lost_context(dev); | |
573 | 573 | |||
574 | ret = BEGIN_LP_RING(10); | 574 | ret = BEGIN_LP_RING(10); | |
575 | if (ret) | 575 | if (ret) | |
576 | return ret; | 576 | return ret; | |
577 | 577 | |||
578 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); | 578 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); | |
579 | OUT_RING(0); | 579 | OUT_RING(0); | |
580 | 580 | |||
581 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); | 581 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); | |
582 | OUT_RING(0); | 582 | OUT_RING(0); | |
583 | if (dev_priv->dri1.current_page == 0) { | 583 | if (dev_priv->dri1.current_page == 0) { | |
584 | OUT_RING(dev_priv->dri1.back_offset); | 584 | OUT_RING(dev_priv->dri1.back_offset); | |
585 | dev_priv->dri1.current_page = 1; | 585 | dev_priv->dri1.current_page = 1; | |
586 | } else { | 586 | } else { | |
587 | OUT_RING(dev_priv->dri1.front_offset); | 587 | OUT_RING(dev_priv->dri1.front_offset); | |
588 | dev_priv->dri1.current_page = 0; | 588 | dev_priv->dri1.current_page = 0; | |
589 | } | 589 | } | |
590 | OUT_RING(0); | 590 | OUT_RING(0); | |
591 | 591 | |||
592 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); | 592 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); | |
593 | OUT_RING(0); | 593 | OUT_RING(0); | |
594 | 594 | |||
595 | ADVANCE_LP_RING(); | 595 | ADVANCE_LP_RING(); | |
596 | 596 | |||
597 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; | 597 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++; | |
598 | 598 | |||
599 | if (BEGIN_LP_RING(4) == 0) { | 599 | if (BEGIN_LP_RING(4) == 0) { | |
600 | OUT_RING(MI_STORE_DWORD_INDEX); | 600 | OUT_RING(MI_STORE_DWORD_INDEX); | |
601 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 601 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
602 | OUT_RING(dev_priv->dri1.counter); | 602 | OUT_RING(dev_priv->dri1.counter); | |
603 | OUT_RING(0); | 603 | OUT_RING(0); | |
604 | ADVANCE_LP_RING(); | 604 | ADVANCE_LP_RING(); | |
605 | } | 605 | } | |
606 | 606 | |||
607 | master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; | 607 | master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; | |
608 | return 0; | 608 | return 0; | |
609 | } | 609 | } | |
610 | 610 | |||
611 | static int i915_quiescent(struct drm_device *dev) | 611 | static int i915_quiescent(struct drm_device *dev) | |
612 | { | 612 | { | |
613 | i915_kernel_lost_context(dev); | 613 | i915_kernel_lost_context(dev); | |
614 | return intel_ring_idle(LP_RING(dev->dev_private)); | 614 | return intel_ring_idle(LP_RING(dev->dev_private)); | |
615 | } | 615 | } | |
616 | 616 | |||
617 | static int i915_flush_ioctl(struct drm_device *dev, void *data, | 617 | static int i915_flush_ioctl(struct drm_device *dev, void *data, | |
618 | struct drm_file *file_priv) | 618 | struct drm_file *file_priv) | |
619 | { | 619 | { | |
620 | int ret; | 620 | int ret; | |
621 | 621 | |||
622 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 622 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
623 | return -ENODEV; | 623 | return -ENODEV; | |
624 | 624 | |||
625 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | 625 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
626 | 626 | |||
627 | mutex_lock(&dev->struct_mutex); | 627 | mutex_lock(&dev->struct_mutex); | |
628 | ret = i915_quiescent(dev); | 628 | ret = i915_quiescent(dev); | |
629 | mutex_unlock(&dev->struct_mutex); | 629 | mutex_unlock(&dev->struct_mutex); | |
630 | 630 | |||
631 | return ret; | 631 | return ret; | |
632 | } | 632 | } | |
633 | 633 | |||
634 | static int i915_batchbuffer(struct drm_device *dev, void *data, | 634 | static int i915_batchbuffer(struct drm_device *dev, void *data, | |
635 | struct drm_file *file_priv) | 635 | struct drm_file *file_priv) | |
636 | { | 636 | { | |
637 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 637 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
638 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 638 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
639 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | 639 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | |
640 | master_priv->sarea_priv; | 640 | master_priv->sarea_priv; | |
641 | drm_i915_batchbuffer_t *batch = data; | 641 | drm_i915_batchbuffer_t *batch = data; | |
642 | int ret; | 642 | int ret; | |
643 | struct drm_clip_rect *cliprects = NULL; | 643 | struct drm_clip_rect *cliprects = NULL; | |
644 | 644 | |||
645 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 645 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
646 | return -ENODEV; | 646 | return -ENODEV; | |
647 | 647 | |||
648 | if (!dev_priv->dri1.allow_batchbuffer) { | 648 | if (!dev_priv->dri1.allow_batchbuffer) { | |
649 | DRM_ERROR("Batchbuffer ioctl disabled\n"); | 649 | DRM_ERROR("Batchbuffer ioctl disabled\n"); | |
650 | return -EINVAL; | 650 | return -EINVAL; | |
651 | } | 651 | } | |
652 | 652 | |||
653 | DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", | 653 | DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", | |
654 | batch->start, batch->used, batch->num_cliprects); | 654 | batch->start, batch->used, batch->num_cliprects); | |
655 | 655 | |||
656 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | 656 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
657 | 657 | |||
658 | if (batch->num_cliprects < 0) | 658 | if (batch->num_cliprects < 0) | |
659 | return -EINVAL; | 659 | return -EINVAL; | |
660 | 660 | |||
661 | if (batch->num_cliprects) { | 661 | if (batch->num_cliprects) { | |
662 | cliprects = kcalloc(batch->num_cliprects, | 662 | cliprects = kcalloc(batch->num_cliprects, | |
663 | sizeof(struct drm_clip_rect), | 663 | sizeof(struct drm_clip_rect), | |
664 | GFP_KERNEL); | 664 | GFP_KERNEL); | |
665 | if (cliprects == NULL) | 665 | if (cliprects == NULL) | |
666 | return -ENOMEM; | 666 | return -ENOMEM; | |
667 | 667 | |||
668 | ret = copy_from_user(cliprects, batch->cliprects, | 668 | ret = copy_from_user(cliprects, batch->cliprects, | |
669 | batch->num_cliprects * | 669 | batch->num_cliprects * | |
670 | sizeof(struct drm_clip_rect)); | 670 | sizeof(struct drm_clip_rect)); | |
671 | if (ret != 0) { | 671 | if (ret != 0) { | |
672 | ret = -EFAULT; | 672 | ret = -EFAULT; | |
673 | goto fail_free; | 673 | goto fail_free; | |
674 | } | 674 | } | |
675 | } | 675 | } | |
676 | 676 | |||
677 | mutex_lock(&dev->struct_mutex); | 677 | mutex_lock(&dev->struct_mutex); | |
678 | ret = i915_dispatch_batchbuffer(dev, batch, cliprects); | 678 | ret = i915_dispatch_batchbuffer(dev, batch, cliprects); | |
679 | mutex_unlock(&dev->struct_mutex); | 679 | mutex_unlock(&dev->struct_mutex); | |
680 | 680 | |||
681 | if (sarea_priv) | 681 | if (sarea_priv) | |
682 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | 682 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | |
683 | 683 | |||
684 | fail_free: | 684 | fail_free: | |
685 | kfree(cliprects); | 685 | kfree(cliprects); | |
686 | 686 | |||
687 | return ret; | 687 | return ret; | |
688 | } | 688 | } | |
689 | 689 | |||
690 | static int i915_cmdbuffer(struct drm_device *dev, void *data, | 690 | static int i915_cmdbuffer(struct drm_device *dev, void *data, | |
691 | struct drm_file *file_priv) | 691 | struct drm_file *file_priv) | |
692 | { | 692 | { | |
693 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 693 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
694 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 694 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
695 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | 695 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | |
696 | master_priv->sarea_priv; | 696 | master_priv->sarea_priv; | |
697 | drm_i915_cmdbuffer_t *cmdbuf = data; | 697 | drm_i915_cmdbuffer_t *cmdbuf = data; | |
698 | struct drm_clip_rect *cliprects = NULL; | 698 | struct drm_clip_rect *cliprects = NULL; | |
699 | void *batch_data; | 699 | void *batch_data; | |
700 | int ret; | 700 | int ret; | |
701 | 701 | |||
702 | DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", | 702 | DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", | |
703 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); | 703 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); | |
704 | 704 | |||
705 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 705 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
706 | return -ENODEV; | 706 | return -ENODEV; | |
707 | 707 | |||
708 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | 708 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
709 | 709 | |||
710 | if (cmdbuf->num_cliprects < 0) | 710 | if (cmdbuf->num_cliprects < 0) | |
711 | return -EINVAL; | 711 | return -EINVAL; | |
712 | 712 | |||
713 | batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); | 713 | batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); | |
714 | if (batch_data == NULL) | 714 | if (batch_data == NULL) | |
715 | return -ENOMEM; | 715 | return -ENOMEM; | |
716 | 716 | |||
717 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); | 717 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); | |
718 | if (ret != 0) { | 718 | if (ret != 0) { | |
719 | ret = -EFAULT; | 719 | ret = -EFAULT; | |
720 | goto fail_batch_free; | 720 | goto fail_batch_free; | |
721 | } | 721 | } | |
722 | 722 | |||
723 | if (cmdbuf->num_cliprects) { | 723 | if (cmdbuf->num_cliprects) { | |
724 | cliprects = kcalloc(cmdbuf->num_cliprects, | 724 | cliprects = kcalloc(cmdbuf->num_cliprects, | |
725 | sizeof(struct drm_clip_rect), GFP_KERNEL); | 725 | sizeof(struct drm_clip_rect), GFP_KERNEL); | |
726 | if (cliprects == NULL) { | 726 | if (cliprects == NULL) { | |
727 | ret = -ENOMEM; | 727 | ret = -ENOMEM; | |
728 | goto fail_batch_free; | 728 | goto fail_batch_free; | |
729 | } | 729 | } | |
730 | 730 | |||
731 | ret = copy_from_user(cliprects, cmdbuf->cliprects, | 731 | ret = copy_from_user(cliprects, cmdbuf->cliprects, | |
732 | cmdbuf->num_cliprects * | 732 | cmdbuf->num_cliprects * | |
733 | sizeof(struct drm_clip_rect)); | 733 | sizeof(struct drm_clip_rect)); | |
734 | if (ret != 0) { | 734 | if (ret != 0) { | |
735 | ret = -EFAULT; | 735 | ret = -EFAULT; | |
736 | goto fail_clip_free; | 736 | goto fail_clip_free; | |
737 | } | 737 | } | |
738 | } | 738 | } | |
739 | 739 | |||
740 | mutex_lock(&dev->struct_mutex); | 740 | mutex_lock(&dev->struct_mutex); | |
741 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); | 741 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); | |
742 | mutex_unlock(&dev->struct_mutex); | 742 | mutex_unlock(&dev->struct_mutex); | |
743 | if (ret) { | 743 | if (ret) { | |
744 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); | 744 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); | |
745 | goto fail_clip_free; | 745 | goto fail_clip_free; | |
746 | } | 746 | } | |
747 | 747 | |||
748 | if (sarea_priv) | 748 | if (sarea_priv) | |
749 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | 749 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | |
750 | 750 | |||
751 | fail_clip_free: | 751 | fail_clip_free: | |
752 | kfree(cliprects); | 752 | kfree(cliprects); | |
753 | fail_batch_free: | 753 | fail_batch_free: | |
754 | kfree(batch_data); | 754 | kfree(batch_data); | |
755 | 755 | |||
756 | return ret; | 756 | return ret; | |
757 | } | 757 | } | |
758 | 758 | |||
759 | static int i915_emit_irq(struct drm_device * dev) | 759 | static int i915_emit_irq(struct drm_device * dev) | |
760 | { | 760 | { | |
761 | drm_i915_private_t *dev_priv = dev->dev_private; | 761 | drm_i915_private_t *dev_priv = dev->dev_private; | |
762 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 762 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
763 | 763 | |||
764 | i915_kernel_lost_context(dev); | 764 | i915_kernel_lost_context(dev); | |
765 | 765 | |||
766 | DRM_DEBUG_DRIVER("\n"); | 766 | DRM_DEBUG_DRIVER("\n"); | |
767 | 767 | |||
768 | dev_priv->dri1.counter++; | 768 | dev_priv->dri1.counter++; | |
769 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) | 769 | if (dev_priv->dri1.counter > 0x7FFFFFFFUL) | |
770 | dev_priv->dri1.counter = 1; | 770 | dev_priv->dri1.counter = 1; | |
771 | if (master_priv->sarea_priv) | 771 | if (master_priv->sarea_priv) | |
772 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; | 772 | master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter; | |
773 | 773 | |||
774 | if (BEGIN_LP_RING(4) == 0) { | 774 | if (BEGIN_LP_RING(4) == 0) { | |
775 | OUT_RING(MI_STORE_DWORD_INDEX); | 775 | OUT_RING(MI_STORE_DWORD_INDEX); | |
776 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 776 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
777 | OUT_RING(dev_priv->dri1.counter); | 777 | OUT_RING(dev_priv->dri1.counter); | |
778 | OUT_RING(MI_USER_INTERRUPT); | 778 | OUT_RING(MI_USER_INTERRUPT); | |
779 | ADVANCE_LP_RING(); | 779 | ADVANCE_LP_RING(); | |
780 | } | 780 | } | |
781 | 781 | |||
782 | return dev_priv->dri1.counter; | 782 | return dev_priv->dri1.counter; | |
783 | } | 783 | } | |
784 | 784 | |||
785 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | 785 | static int i915_wait_irq(struct drm_device * dev, int irq_nr) | |
786 | { | 786 | { | |
787 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 787 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
788 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 788 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
789 | int ret = 0; | 789 | int ret = 0; | |
790 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | 790 | struct intel_ring_buffer *ring = LP_RING(dev_priv); | |
791 | 791 | |||
792 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, | 792 | DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, | |
793 | READ_BREADCRUMB(dev_priv)); | 793 | READ_BREADCRUMB(dev_priv)); | |
794 | 794 | |||
795 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { | 795 | if (READ_BREADCRUMB(dev_priv) >= irq_nr) { | |
796 | if (master_priv->sarea_priv) | 796 | if (master_priv->sarea_priv) | |
797 | master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | 797 | master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | |
798 | return 0; | 798 | return 0; | |
799 | } | 799 | } | |
800 | 800 | |||
801 | if (master_priv->sarea_priv) | 801 | if (master_priv->sarea_priv) | |
802 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 802 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | |
803 | 803 | |||
804 | if (ring->irq_get(ring)) { | 804 | if (ring->irq_get(ring)) { | |
805 | #ifdef __NetBSD__ | |||
806 | DRM_TIMED_WAIT_UNTIL(ret, &ring->irq_queue, &drm_global_mutex, | |||
807 | 3 * DRM_HZ, | |||
808 | READ_BREADCRUMB(dev_priv) >= irq_nr); | |||
809 | #else | |||
805 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, | 810 | DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, | |
806 | READ_BREADCRUMB(dev_priv) >= irq_nr); | 811 | READ_BREADCRUMB(dev_priv) >= irq_nr); | |
812 | #endif | |||
807 | ring->irq_put(ring); | 813 | ring->irq_put(ring); | |
808 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) | 814 | } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) | |
809 | ret = -EBUSY; | 815 | ret = -EBUSY; | |
810 | 816 | |||
811 | if (ret == -EBUSY) { | 817 | if (ret == -EBUSY) { | |
812 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", | 818 | DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", | |
813 | READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); | 819 | READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); | |
814 | } | 820 | } | |
815 | 821 | |||
816 | return ret; | 822 | return ret; | |
817 | } | 823 | } | |
818 | 824 | |||
819 | /* Needs the lock as it touches the ring. | 825 | /* Needs the lock as it touches the ring. | |
820 | */ | 826 | */ | |
821 | static int i915_irq_emit(struct drm_device *dev, void *data, | 827 | static int i915_irq_emit(struct drm_device *dev, void *data, | |
822 | struct drm_file *file_priv) | 828 | struct drm_file *file_priv) | |
823 | { | 829 | { | |
824 | drm_i915_private_t *dev_priv = dev->dev_private; | 830 | drm_i915_private_t *dev_priv = dev->dev_private; | |
825 | drm_i915_irq_emit_t *emit = data; | 831 | drm_i915_irq_emit_t *emit = data; | |
826 | int result; | 832 | int result; | |
827 | 833 | |||
828 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 834 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
829 | return -ENODEV; | 835 | return -ENODEV; | |
830 | 836 | |||
831 | #ifdef __NetBSD__ | 837 | #ifdef __NetBSD__ | |
832 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start_mapped) { | 838 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start_mapped) { | |
833 | DRM_ERROR("called with no initialization\n"); | 839 | DRM_ERROR("called with no initialization\n"); | |
834 | return -EINVAL; | 840 | return -EINVAL; | |
835 | } | 841 | } | |
836 | #else | 842 | #else | |
837 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { | 843 | if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { | |
838 | DRM_ERROR("called with no initialization\n"); | 844 | DRM_ERROR("called with no initialization\n"); | |
839 | return -EINVAL; | 845 | return -EINVAL; | |
840 | } | 846 | } | |
841 | #endif | 847 | #endif | |
842 | 848 | |||
843 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | 849 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
844 | 850 | |||
845 | mutex_lock(&dev->struct_mutex); | 851 | mutex_lock(&dev->struct_mutex); | |
846 | result = i915_emit_irq(dev); | 852 | result = i915_emit_irq(dev); | |
847 | mutex_unlock(&dev->struct_mutex); | 853 | mutex_unlock(&dev->struct_mutex); | |
848 | 854 | |||
849 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { | 855 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { | |
850 | DRM_ERROR("copy_to_user\n"); | 856 | DRM_ERROR("copy_to_user\n"); | |
851 | return -EFAULT; | 857 | return -EFAULT; | |
852 | } | 858 | } | |
853 | 859 | |||
854 | return 0; | 860 | return 0; | |
855 | } | 861 | } | |
856 | 862 | |||
857 | /* Doesn't need the hardware lock. | 863 | /* Doesn't need the hardware lock. | |
858 | */ | 864 | */ | |
859 | static int i915_irq_wait(struct drm_device *dev, void *data, | 865 | static int i915_irq_wait(struct drm_device *dev, void *data, | |
860 | struct drm_file *file_priv) | 866 | struct drm_file *file_priv) | |
861 | { | 867 | { | |
862 | drm_i915_private_t *dev_priv = dev->dev_private; | 868 | drm_i915_private_t *dev_priv = dev->dev_private; | |
863 | drm_i915_irq_wait_t *irqwait = data; | 869 | drm_i915_irq_wait_t *irqwait = data; | |
864 | 870 | |||
865 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 871 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
866 | return -ENODEV; | 872 | return -ENODEV; | |
867 | 873 | |||
868 | if (!dev_priv) { | 874 | if (!dev_priv) { | |
869 | DRM_ERROR("called with no initialization\n"); | 875 | DRM_ERROR("called with no initialization\n"); | |
870 | return -EINVAL; | 876 | return -EINVAL; | |
871 | } | 877 | } | |
872 | 878 | |||
873 | return i915_wait_irq(dev, irqwait->irq_seq); | 879 | return i915_wait_irq(dev, irqwait->irq_seq); | |
874 | } | 880 | } | |
875 | 881 | |||
876 | static int i915_vblank_pipe_get(struct drm_device *dev, void *data, | 882 | static int i915_vblank_pipe_get(struct drm_device *dev, void *data, | |
877 | struct drm_file *file_priv) | 883 | struct drm_file *file_priv) | |
878 | { | 884 | { | |
879 | drm_i915_private_t *dev_priv = dev->dev_private; | 885 | drm_i915_private_t *dev_priv = dev->dev_private; | |
880 | drm_i915_vblank_pipe_t *pipe = data; | 886 | drm_i915_vblank_pipe_t *pipe = data; | |
881 | 887 | |||
882 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 888 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
883 | return -ENODEV; | 889 | return -ENODEV; | |
884 | 890 | |||
885 | if (!dev_priv) { | 891 | if (!dev_priv) { | |
886 | DRM_ERROR("called with no initialization\n"); | 892 | DRM_ERROR("called with no initialization\n"); | |
887 | return -EINVAL; | 893 | return -EINVAL; | |
888 | } | 894 | } | |
889 | 895 | |||
890 | pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | 896 | pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; | |
891 | 897 | |||
892 | return 0; | 898 | return 0; | |
893 | } | 899 | } | |
894 | 900 | |||
895 | /** | 901 | /** | |
896 | * Schedule buffer swap at given vertical blank. | 902 | * Schedule buffer swap at given vertical blank. | |
897 | */ | 903 | */ | |
898 | static int i915_vblank_swap(struct drm_device *dev, void *data, | 904 | static int i915_vblank_swap(struct drm_device *dev, void *data, | |
899 | struct drm_file *file_priv) | 905 | struct drm_file *file_priv) | |
900 | { | 906 | { | |
901 | /* The delayed swap mechanism was fundamentally racy, and has been | 907 | /* The delayed swap mechanism was fundamentally racy, and has been | |
902 | * removed. The model was that the client requested a delayed flip/swap | 908 | * removed. The model was that the client requested a delayed flip/swap | |
903 | * from the kernel, then waited for vblank before continuing to perform | 909 | * from the kernel, then waited for vblank before continuing to perform | |
904 | * rendering. The problem was that the kernel might wake the client | 910 | * rendering. The problem was that the kernel might wake the client | |
905 | * up before it dispatched the vblank swap (since the lock has to be | 911 | * up before it dispatched the vblank swap (since the lock has to be | |
906 | * held while touching the ringbuffer), in which case the client would | 912 | * held while touching the ringbuffer), in which case the client would | |
907 | * clear and start the next frame before the swap occurred, and | 913 | * clear and start the next frame before the swap occurred, and | |
908 | * flicker would occur in addition to likely missing the vblank. | 914 | * flicker would occur in addition to likely missing the vblank. | |
909 | * | 915 | * | |
910 | * In the absence of this ioctl, userland falls back to a correct path | 916 | * In the absence of this ioctl, userland falls back to a correct path | |
911 | * of waiting for a vblank, then dispatching the swap on its own. | 917 | * of waiting for a vblank, then dispatching the swap on its own. | |
912 | * Context switching to userland and back is plenty fast enough for | 918 | * Context switching to userland and back is plenty fast enough for | |
913 | * meeting the requirements of vblank swapping. | 919 | * meeting the requirements of vblank swapping. | |
914 | */ | 920 | */ | |
915 | return -EINVAL; | 921 | return -EINVAL; | |
916 | } | 922 | } | |
917 | 923 | |||
918 | static int i915_flip_bufs(struct drm_device *dev, void *data, | 924 | static int i915_flip_bufs(struct drm_device *dev, void *data, | |
919 | struct drm_file *file_priv) | 925 | struct drm_file *file_priv) | |
920 | { | 926 | { | |
921 | int ret; | 927 | int ret; | |
922 | 928 | |||
923 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 929 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
924 | return -ENODEV; | 930 | return -ENODEV; | |
925 | 931 | |||
926 | DRM_DEBUG_DRIVER("%s\n", __func__); | 932 | DRM_DEBUG_DRIVER("%s\n", __func__); | |
927 | 933 | |||
928 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | 934 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
929 | 935 | |||
930 | mutex_lock(&dev->struct_mutex); | 936 | mutex_lock(&dev->struct_mutex); | |
931 | ret = i915_dispatch_flip(dev); | 937 | ret = i915_dispatch_flip(dev); | |
932 | mutex_unlock(&dev->struct_mutex); | 938 | mutex_unlock(&dev->struct_mutex); | |
933 | 939 | |||
934 | return ret; | 940 | return ret; | |
935 | } | 941 | } | |
936 | 942 | |||
937 | static int i915_getparam(struct drm_device *dev, void *data, | 943 | static int i915_getparam(struct drm_device *dev, void *data, | |
938 | struct drm_file *file_priv) | 944 | struct drm_file *file_priv) | |
939 | { | 945 | { | |
940 | drm_i915_private_t *dev_priv = dev->dev_private; | 946 | drm_i915_private_t *dev_priv = dev->dev_private; | |
941 | drm_i915_getparam_t *param = data; | 947 | drm_i915_getparam_t *param = data; | |
942 | int value; | 948 | int value; | |
943 | 949 | |||
944 | if (!dev_priv) { | 950 | if (!dev_priv) { | |
945 | DRM_ERROR("called with no initialization\n"); | 951 | DRM_ERROR("called with no initialization\n"); | |
946 | return -EINVAL; | 952 | return -EINVAL; | |
947 | } | 953 | } | |
948 | 954 | |||
949 | switch (param->param) { | 955 | switch (param->param) { | |
950 | case I915_PARAM_IRQ_ACTIVE: | 956 | case I915_PARAM_IRQ_ACTIVE: | |
951 | value = dev->pdev->irq ? 1 : 0; | 957 | value = dev->pdev->irq ? 1 : 0; | |
952 | break; | 958 | break; | |
953 | case I915_PARAM_ALLOW_BATCHBUFFER: | 959 | case I915_PARAM_ALLOW_BATCHBUFFER: | |
954 | value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; | 960 | value = dev_priv->dri1.allow_batchbuffer ? 1 : 0; | |
955 | break; | 961 | break; | |
956 | case I915_PARAM_LAST_DISPATCH: | 962 | case I915_PARAM_LAST_DISPATCH: | |
957 | value = READ_BREADCRUMB(dev_priv); | 963 | value = READ_BREADCRUMB(dev_priv); | |
958 | break; | 964 | break; | |
959 | case I915_PARAM_CHIPSET_ID: | 965 | case I915_PARAM_CHIPSET_ID: | |
960 | value = dev->pci_device; | 966 | value = dev->pci_device; | |
961 | break; | 967 | break; | |
962 | case I915_PARAM_HAS_GEM: | 968 | case I915_PARAM_HAS_GEM: | |
963 | value = 1; | 969 | value = 1; | |
964 | break; | 970 | break; | |
965 | case I915_PARAM_NUM_FENCES_AVAIL: | 971 | case I915_PARAM_NUM_FENCES_AVAIL: | |
966 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; | 972 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; | |
967 | break; | 973 | break; | |
968 | case I915_PARAM_HAS_OVERLAY: | 974 | case I915_PARAM_HAS_OVERLAY: | |
969 | value = dev_priv->overlay ? 1 : 0; | 975 | value = dev_priv->overlay ? 1 : 0; | |
970 | break; | 976 | break; | |
971 | case I915_PARAM_HAS_PAGEFLIPPING: | 977 | case I915_PARAM_HAS_PAGEFLIPPING: | |
972 | value = 1; | 978 | value = 1; | |
973 | break; | 979 | break; | |
974 | case I915_PARAM_HAS_EXECBUF2: | 980 | case I915_PARAM_HAS_EXECBUF2: | |
975 | /* depends on GEM */ | 981 | /* depends on GEM */ | |
976 | value = 1; | 982 | value = 1; | |
977 | break; | 983 | break; | |
978 | case I915_PARAM_HAS_BSD: | 984 | case I915_PARAM_HAS_BSD: | |
979 | value = intel_ring_initialized(&dev_priv->ring[VCS]); | 985 | value = intel_ring_initialized(&dev_priv->ring[VCS]); | |
980 | break; | 986 | break; | |
981 | case I915_PARAM_HAS_BLT: | 987 | case I915_PARAM_HAS_BLT: | |
982 | value = intel_ring_initialized(&dev_priv->ring[BCS]); | 988 | value = intel_ring_initialized(&dev_priv->ring[BCS]); | |
983 | break; | 989 | break; | |
984 | case I915_PARAM_HAS_RELAXED_FENCING: | 990 | case I915_PARAM_HAS_RELAXED_FENCING: | |
985 | value = 1; | 991 | value = 1; | |
986 | break; | 992 | break; | |
987 | case I915_PARAM_HAS_COHERENT_RINGS: | 993 | case I915_PARAM_HAS_COHERENT_RINGS: | |
988 | value = 1; | 994 | value = 1; | |
989 | break; | 995 | break; | |
990 | case I915_PARAM_HAS_EXEC_CONSTANTS: | 996 | case I915_PARAM_HAS_EXEC_CONSTANTS: | |
991 | value = INTEL_INFO(dev)->gen >= 4; | 997 | value = INTEL_INFO(dev)->gen >= 4; | |
992 | break; | 998 | break; | |
993 | case I915_PARAM_HAS_RELAXED_DELTA: | 999 | case I915_PARAM_HAS_RELAXED_DELTA: | |
994 | value = 1; | 1000 | value = 1; | |
995 | break; | 1001 | break; | |
996 | case I915_PARAM_HAS_GEN7_SOL_RESET: | 1002 | case I915_PARAM_HAS_GEN7_SOL_RESET: | |
997 | value = 1; | 1003 | value = 1; | |
998 | break; | 1004 | break; | |
999 | case I915_PARAM_HAS_LLC: | 1005 | case I915_PARAM_HAS_LLC: | |
1000 | value = HAS_LLC(dev); | 1006 | value = HAS_LLC(dev); | |
1001 | break; | 1007 | break; | |
1002 | case I915_PARAM_HAS_ALIASING_PPGTT: | 1008 | case I915_PARAM_HAS_ALIASING_PPGTT: | |
1003 | value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; | 1009 | value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; | |
1004 | break; | 1010 | break; | |
1005 | case I915_PARAM_HAS_WAIT_TIMEOUT: | 1011 | case I915_PARAM_HAS_WAIT_TIMEOUT: | |
1006 | value = 1; | 1012 | value = 1; | |
1007 | break; | 1013 | break; | |
1008 | case I915_PARAM_HAS_SEMAPHORES: | 1014 | case I915_PARAM_HAS_SEMAPHORES: | |
1009 | value = i915_semaphore_is_enabled(dev); | 1015 | value = i915_semaphore_is_enabled(dev); | |
1010 | break; | 1016 | break; | |
1011 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: | 1017 | case I915_PARAM_HAS_PRIME_VMAP_FLUSH: | |
1012 | value = 1; | 1018 | value = 1; | |
1013 | break; | 1019 | break; | |
1014 | case I915_PARAM_HAS_SECURE_BATCHES: | 1020 | case I915_PARAM_HAS_SECURE_BATCHES: | |
1015 | value = capable(CAP_SYS_ADMIN); | 1021 | value = capable(CAP_SYS_ADMIN); | |
1016 | break; | 1022 | break; | |
1017 | case I915_PARAM_HAS_PINNED_BATCHES: | 1023 | case I915_PARAM_HAS_PINNED_BATCHES: | |
1018 | value = 1; | 1024 | value = 1; | |
1019 | break; | 1025 | break; | |
1020 | default: | 1026 | default: | |
1021 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 1027 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | |
1022 | param->param); | 1028 | param->param); | |
1023 | return -EINVAL; | 1029 | return -EINVAL; | |
1024 | } | 1030 | } | |
1025 | 1031 | |||
1026 | if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { | 1032 | if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { | |
1027 | DRM_ERROR("DRM_COPY_TO_USER failed\n"); | 1033 | DRM_ERROR("DRM_COPY_TO_USER failed\n"); | |
1028 | return -EFAULT; | 1034 | return -EFAULT; | |
1029 | } | 1035 | } | |
1030 | 1036 | |||
1031 | return 0; | 1037 | return 0; | |
1032 | } | 1038 | } | |
1033 | 1039 | |||
1034 | static int i915_setparam(struct drm_device *dev, void *data, | 1040 | static int i915_setparam(struct drm_device *dev, void *data, | |
1035 | struct drm_file *file_priv) | 1041 | struct drm_file *file_priv) | |
1036 | { | 1042 | { | |
1037 | drm_i915_private_t *dev_priv = dev->dev_private; | 1043 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1038 | drm_i915_setparam_t *param = data; | 1044 | drm_i915_setparam_t *param = data; | |
1039 | 1045 | |||
1040 | if (!dev_priv) { | 1046 | if (!dev_priv) { | |
1041 | DRM_ERROR("called with no initialization\n"); | 1047 | DRM_ERROR("called with no initialization\n"); | |
1042 | return -EINVAL; | 1048 | return -EINVAL; | |
1043 | } | 1049 | } | |
1044 | 1050 | |||
1045 | switch (param->param) { | 1051 | switch (param->param) { | |
1046 | case I915_SETPARAM_USE_MI_BATCHBUFFER_START: | 1052 | case I915_SETPARAM_USE_MI_BATCHBUFFER_START: | |
1047 | break; | 1053 | break; | |
1048 | case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: | 1054 | case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: | |
1049 | break; | 1055 | break; | |
1050 | case I915_SETPARAM_ALLOW_BATCHBUFFER: | 1056 | case I915_SETPARAM_ALLOW_BATCHBUFFER: | |
1051 | dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; | 1057 | dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; | |
1052 | break; | 1058 | break; | |
1053 | case I915_SETPARAM_NUM_USED_FENCES: | 1059 | case I915_SETPARAM_NUM_USED_FENCES: | |
1054 | if (param->value > dev_priv->num_fence_regs || | 1060 | if (param->value > dev_priv->num_fence_regs || | |
1055 | param->value < 0) | 1061 | param->value < 0) | |
1056 | return -EINVAL; | 1062 | return -EINVAL; | |
1057 | /* Userspace can use first N regs */ | 1063 | /* Userspace can use first N regs */ | |
1058 | dev_priv->fence_reg_start = param->value; | 1064 | dev_priv->fence_reg_start = param->value; | |
1059 | break; | 1065 | break; | |
1060 | default: | 1066 | default: | |
1061 | DRM_DEBUG_DRIVER("unknown parameter %d\n", | 1067 | DRM_DEBUG_DRIVER("unknown parameter %d\n", | |
1062 | param->param); | 1068 | param->param); | |
1063 | return -EINVAL; | 1069 | return -EINVAL; | |
1064 | } | 1070 | } | |
1065 | 1071 | |||
1066 | return 0; | 1072 | return 0; | |
1067 | } | 1073 | } | |
1068 | 1074 | |||
1069 | static int i915_set_status_page(struct drm_device *dev, void *data, | 1075 | static int i915_set_status_page(struct drm_device *dev, void *data, | |
1070 | struct drm_file *file_priv) | 1076 | struct drm_file *file_priv) | |
1071 | { | 1077 | { | |
1072 | drm_i915_private_t *dev_priv = dev->dev_private; | 1078 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1073 | drm_i915_hws_addr_t *hws = data; | 1079 | drm_i915_hws_addr_t *hws = data; | |
1074 | struct intel_ring_buffer *ring; | 1080 | struct intel_ring_buffer *ring; | |
1075 | #ifdef __NetBSD__ | 1081 | #ifdef __NetBSD__ | |
1076 | struct drm_local_map *const gfx_hws_cpu_map = | 1082 | struct drm_local_map *const gfx_hws_cpu_map = | |
1077 | &dev_priv->dri1.gfx_hws_cpu_map; | 1083 | &dev_priv->dri1.gfx_hws_cpu_map; | |
1078 | #endif | 1084 | #endif | |
1079 | 1085 | |||
1080 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 1086 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
1081 | return -ENODEV; | 1087 | return -ENODEV; | |
1082 | 1088 | |||
1083 | if (!I915_NEED_GFX_HWS(dev)) | 1089 | if (!I915_NEED_GFX_HWS(dev)) | |
1084 | return -EINVAL; | 1090 | return -EINVAL; | |
1085 | 1091 | |||
1086 | if (!dev_priv) { | 1092 | if (!dev_priv) { | |
1087 | DRM_ERROR("called with no initialization\n"); | 1093 | DRM_ERROR("called with no initialization\n"); | |
1088 | return -EINVAL; | 1094 | return -EINVAL; | |
1089 | } | 1095 | } | |
1090 | 1096 | |||
1091 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 1097 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | |
1092 | WARN(1, "tried to set status page when mode setting active\n"); | 1098 | WARN(1, "tried to set status page when mode setting active\n"); | |
1093 | return 0; | 1099 | return 0; | |
1094 | } | 1100 | } | |
1095 | 1101 | |||
1096 | DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); | 1102 | DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); | |
1097 | 1103 | |||
1098 | ring = LP_RING(dev_priv); | 1104 | ring = LP_RING(dev_priv); | |
1099 | ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); | 1105 | ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); | |
1100 | 1106 | |||
1101 | #ifdef __NetBSD__ | 1107 | #ifdef __NetBSD__ | |
1102 | gfx_hws_cpu_map->offset = (dev_priv->mm.gtt_base_addr + | 1108 | gfx_hws_cpu_map->offset = (dev_priv->mm.gtt_base_addr + | |
1103 | hws->addr); | 1109 | hws->addr); | |
1104 | gfx_hws_cpu_map->size = 4096; | 1110 | gfx_hws_cpu_map->size = 4096; | |
1105 | gfx_hws_cpu_map->flags = 0; | 1111 | gfx_hws_cpu_map->flags = 0; | |
1106 | gfx_hws_cpu_map->flags |= _DRM_RESTRICTED; | 1112 | gfx_hws_cpu_map->flags |= _DRM_RESTRICTED; | |
1107 | gfx_hws_cpu_map->flags |= _DRM_KERNEL; | 1113 | gfx_hws_cpu_map->flags |= _DRM_KERNEL; | |
1108 | gfx_hws_cpu_map->flags |= _DRM_WRITE_COMBINING; | 1114 | gfx_hws_cpu_map->flags |= _DRM_WRITE_COMBINING; | |
1109 | gfx_hws_cpu_map->flags |= _DRM_DRIVER; | 1115 | gfx_hws_cpu_map->flags |= _DRM_DRIVER; | |
1110 | if (drm_ioremap(dev, gfx_hws_cpu_map) == NULL) { | 1116 | if (drm_ioremap(dev, gfx_hws_cpu_map) == NULL) { | |
1111 | i915_dma_cleanup(dev); | 1117 | i915_dma_cleanup(dev); | |
1112 | ring->status_page.gfx_addr = 0; | 1118 | ring->status_page.gfx_addr = 0; | |
1113 | DRM_ERROR("can not ioremap virtual address for" | 1119 | DRM_ERROR("can not ioremap virtual address for" | |
1114 | " G33 hw status page\n"); | 1120 | " G33 hw status page\n"); | |
1115 | return -ENOMEM; | 1121 | return -ENOMEM; | |
1116 | } | 1122 | } | |
1117 | 1123 | |||
1118 | /* XXX drm_local_map abstraction violation. Pooh. */ | 1124 | /* XXX drm_local_map abstraction violation. Pooh. */ | |
1119 | bus_space_set_region_1(gfx_hws_cpu_map->lm_data.bus_space.bst, | 1125 | bus_space_set_region_1(gfx_hws_cpu_map->lm_data.bus_space.bst, | |
1120 | gfx_hws_cpu_map->lm_data.bus_space.bsh, 0, 0, PAGE_SIZE); | 1126 | gfx_hws_cpu_map->lm_data.bus_space.bsh, 0, 0, PAGE_SIZE); | |
1121 | #else | 1127 | #else | |
1122 | dev_priv->dri1.gfx_hws_cpu_addr = | 1128 | dev_priv->dri1.gfx_hws_cpu_addr = | |
1123 | ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096); | 1129 | ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096); | |
1124 | if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { | 1130 | if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { | |
1125 | i915_dma_cleanup(dev); | 1131 | i915_dma_cleanup(dev); | |
1126 | ring->status_page.gfx_addr = 0; | 1132 | ring->status_page.gfx_addr = 0; | |
1127 | DRM_ERROR("can not ioremap virtual address for" | 1133 | DRM_ERROR("can not ioremap virtual address for" | |
1128 | " G33 hw status page\n"); | 1134 | " G33 hw status page\n"); | |
1129 | return -ENOMEM; | 1135 | return -ENOMEM; | |
1130 | } | 1136 | } | |
1131 | 1137 | |||
1132 | memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE); | 1138 | memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE); | |
1133 | #endif | 1139 | #endif | |
1134 | 1140 | |||
1135 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); | 1141 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); | |
1136 | 1142 | |||
1137 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", | 1143 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", | |
1138 | ring->status_page.gfx_addr); | 1144 | ring->status_page.gfx_addr); | |
1139 | DRM_DEBUG_DRIVER("load hws at %p\n", | 1145 | DRM_DEBUG_DRIVER("load hws at %p\n", | |
1140 | ring->status_page.page_addr); | 1146 | ring->status_page.page_addr); | |
1141 | return 0; | 1147 | return 0; | |
1142 | } | 1148 | } | |
1143 | 1149 | |||
1144 | static int i915_get_bridge_dev(struct drm_device *dev) | 1150 | static int i915_get_bridge_dev(struct drm_device *dev) | |
1145 | { | 1151 | { | |
1146 | struct drm_i915_private *dev_priv = dev->dev_private; | 1152 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1147 | 1153 | |||
1148 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); | 1154 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); | |
1149 | if (!dev_priv->bridge_dev) { | 1155 | if (!dev_priv->bridge_dev) { | |
1150 | DRM_ERROR("bridge device not found\n"); | 1156 | DRM_ERROR("bridge device not found\n"); | |
1151 | return -1; | 1157 | return -1; | |
1152 | } | 1158 | } | |
1153 | return 0; | 1159 | return 0; | |
1154 | } | 1160 | } | |
1155 | 1161 | |||
1156 | #define MCHBAR_I915 0x44 | 1162 | #define MCHBAR_I915 0x44 | |
1157 | #define MCHBAR_I965 0x48 | 1163 | #define MCHBAR_I965 0x48 | |
1158 | #define MCHBAR_SIZE (4*4096) | 1164 | #define MCHBAR_SIZE (4*4096) | |
1159 | 1165 | |||
1160 | #define DEVEN_REG 0x54 | 1166 | #define DEVEN_REG 0x54 | |
1161 | #define DEVEN_MCHBAR_EN (1 << 28) | 1167 | #define DEVEN_MCHBAR_EN (1 << 28) | |
1162 | 1168 | |||
1163 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | 1169 | /* Allocate space for the MCH regs if needed, return nonzero on error */ | |
1164 | static int | 1170 | static int | |
1165 | intel_alloc_mchbar_resource(struct drm_device *dev) | 1171 | intel_alloc_mchbar_resource(struct drm_device *dev) | |
1166 | { | 1172 | { | |
1167 | drm_i915_private_t *dev_priv = dev->dev_private; | 1173 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1168 | int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | 1174 | int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | |
1169 | u32 temp_lo, temp_hi = 0; | 1175 | u32 temp_lo, temp_hi = 0; | |
1170 | u64 mchbar_addr; | 1176 | u64 mchbar_addr; | |
1171 | int ret; | 1177 | int ret; | |
1172 | 1178 | |||
1173 | if (INTEL_INFO(dev)->gen >= 4) | 1179 | if (INTEL_INFO(dev)->gen >= 4) | |
1174 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); | 1180 | pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); | |
1175 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | 1181 | pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); | |
1176 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | 1182 | mchbar_addr = ((u64)temp_hi << 32) | temp_lo; | |
1177 | 1183 | |||
1178 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | 1184 | /* If ACPI doesn't have it, assume we need to allocate it ourselves */ | |
1179 | #ifdef CONFIG_PNP | 1185 | #ifdef CONFIG_PNP | |
1180 | if (mchbar_addr && | 1186 | if (mchbar_addr && | |
1181 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) | 1187 | pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) | |
1182 | return 0; | 1188 | return 0; | |
1183 | #endif | 1189 | #endif | |
1184 | 1190 | |||
1185 | /* Get some space for it */ | 1191 | /* Get some space for it */ | |
1186 | dev_priv->mch_res.name = "i915 MCHBAR"; | 1192 | dev_priv->mch_res.name = "i915 MCHBAR"; | |
1187 | dev_priv->mch_res.flags = IORESOURCE_MEM; | 1193 | dev_priv->mch_res.flags = IORESOURCE_MEM; | |
1188 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, | 1194 | ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, | |
1189 | &dev_priv->mch_res, | 1195 | &dev_priv->mch_res, | |
1190 | MCHBAR_SIZE, MCHBAR_SIZE, | 1196 | MCHBAR_SIZE, MCHBAR_SIZE, | |
1191 | PCIBIOS_MIN_MEM, | 1197 | PCIBIOS_MIN_MEM, | |
1192 | 0, pcibios_align_resource, | 1198 | 0, pcibios_align_resource, | |
1193 | dev_priv->bridge_dev); | 1199 | dev_priv->bridge_dev); | |
1194 | if (ret) { | 1200 | if (ret) { | |
1195 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); | 1201 | DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); | |
1196 | dev_priv->mch_res.start = 0; | 1202 | dev_priv->mch_res.start = 0; | |
1197 | return ret; | 1203 | return ret; | |
1198 | } | 1204 | } | |
1199 | 1205 | |||
1200 | if (INTEL_INFO(dev)->gen >= 4) | 1206 | if (INTEL_INFO(dev)->gen >= 4) | |
1201 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, | 1207 | pci_write_config_dword(dev_priv->bridge_dev, reg + 4, | |
1202 | upper_32_bits(dev_priv->mch_res.start)); | 1208 | upper_32_bits(dev_priv->mch_res.start)); | |
1203 | 1209 | |||
1204 | pci_write_config_dword(dev_priv->bridge_dev, reg, | 1210 | pci_write_config_dword(dev_priv->bridge_dev, reg, | |
1205 | lower_32_bits(dev_priv->mch_res.start)); | 1211 | lower_32_bits(dev_priv->mch_res.start)); | |
1206 | return 0; | 1212 | return 0; | |
1207 | } | 1213 | } | |
1208 | 1214 | |||
1209 | /* Setup MCHBAR if possible, return true if we should disable it again */ | 1215 | /* Setup MCHBAR if possible, return true if we should disable it again */ | |
1210 | static void | 1216 | static void | |
1211 | intel_setup_mchbar(struct drm_device *dev) | 1217 | intel_setup_mchbar(struct drm_device *dev) | |
1212 | { | 1218 | { | |
1213 | drm_i915_private_t *dev_priv = dev->dev_private; | 1219 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1214 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | 1220 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | |
1215 | u32 temp; | 1221 | u32 temp; | |
1216 | bool enabled; | 1222 | bool enabled; | |
1217 | 1223 | |||
1218 | dev_priv->mchbar_need_disable = false; | 1224 | dev_priv->mchbar_need_disable = false; | |
1219 | 1225 | |||
1220 | if (IS_I915G(dev) || IS_I915GM(dev)) { | 1226 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
1221 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | 1227 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | |
1222 | enabled = !!(temp & DEVEN_MCHBAR_EN); | 1228 | enabled = !!(temp & DEVEN_MCHBAR_EN); | |
1223 | } else { | 1229 | } else { | |
1224 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | 1230 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
1225 | enabled = temp & 1; | 1231 | enabled = temp & 1; | |
1226 | } | 1232 | } | |
1227 | 1233 | |||
1228 | /* If it's already enabled, don't have to do anything */ | 1234 | /* If it's already enabled, don't have to do anything */ | |
1229 | if (enabled) | 1235 | if (enabled) | |
1230 | return; | 1236 | return; | |
1231 | 1237 | |||
1232 | if (intel_alloc_mchbar_resource(dev)) | 1238 | if (intel_alloc_mchbar_resource(dev)) | |
1233 | return; | 1239 | return; | |
1234 | 1240 | |||
1235 | dev_priv->mchbar_need_disable = true; | 1241 | dev_priv->mchbar_need_disable = true; | |
1236 | 1242 | |||
1237 | /* Space is allocated or reserved, so enable it. */ | 1243 | /* Space is allocated or reserved, so enable it. */ | |
1238 | if (IS_I915G(dev) || IS_I915GM(dev)) { | 1244 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
1239 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, | 1245 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, | |
1240 | temp | DEVEN_MCHBAR_EN); | 1246 | temp | DEVEN_MCHBAR_EN); | |
1241 | } else { | 1247 | } else { | |
1242 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | 1248 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
1243 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); | 1249 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); | |
1244 | } | 1250 | } | |
1245 | } | 1251 | } | |
1246 | 1252 | |||
1247 | static void | 1253 | static void | |
1248 | intel_teardown_mchbar(struct drm_device *dev) | 1254 | intel_teardown_mchbar(struct drm_device *dev) | |
1249 | { | 1255 | { | |
1250 | drm_i915_private_t *dev_priv = dev->dev_private; | 1256 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1251 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | 1257 | int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; | |
1252 | u32 temp; | 1258 | u32 temp; | |
1253 | 1259 | |||
1254 | if (dev_priv->mchbar_need_disable) { | 1260 | if (dev_priv->mchbar_need_disable) { | |
1255 | if (IS_I915G(dev) || IS_I915GM(dev)) { | 1261 | if (IS_I915G(dev) || IS_I915GM(dev)) { | |
1256 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | 1262 | pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); | |
1257 | temp &= ~DEVEN_MCHBAR_EN; | 1263 | temp &= ~DEVEN_MCHBAR_EN; | |
1258 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); | 1264 | pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); | |
1259 | } else { | 1265 | } else { | |
1260 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | 1266 | pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); | |
1261 | temp &= ~1; | 1267 | temp &= ~1; | |
1262 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); | 1268 | pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); | |
1263 | } | 1269 | } | |
1264 | } | 1270 | } | |
1265 | 1271 | |||
1266 | if (dev_priv->mch_res.start) | 1272 | if (dev_priv->mch_res.start) | |
1267 | release_resource(&dev_priv->mch_res); | 1273 | release_resource(&dev_priv->mch_res); | |
1268 | } | 1274 | } | |
1269 | 1275 | |||
1270 | #ifndef __NetBSD__ /* XXX vga */ | 1276 | #ifndef __NetBSD__ /* XXX vga */ | |
1271 | /* true = enable decode, false = disable decoder */ | 1277 | /* true = enable decode, false = disable decoder */ | |
1272 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | 1278 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | |
1273 | { | 1279 | { | |
1274 | struct drm_device *dev = cookie; | 1280 | struct drm_device *dev = cookie; | |
1275 | 1281 | |||
1276 | intel_modeset_vga_set_state(dev, state); | 1282 | intel_modeset_vga_set_state(dev, state); | |
1277 | if (state) | 1283 | if (state) | |
1278 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | 1284 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | |
1279 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 1285 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
1280 | else | 1286 | else | |
1281 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 1287 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
1282 | } | 1288 | } | |
1283 | 1289 | |||
1284 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) | 1290 | static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) | |
1285 | { | 1291 | { | |
1286 | struct drm_device *dev = pci_get_drvdata(pdev); | 1292 | struct drm_device *dev = pci_get_drvdata(pdev); | |
1287 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | 1293 | pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; | |
1288 | if (state == VGA_SWITCHEROO_ON) { | 1294 | if (state == VGA_SWITCHEROO_ON) { | |
1289 | pr_info("switched on\n"); | 1295 | pr_info("switched on\n"); | |
1290 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | 1296 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
1291 | /* i915 resume handler doesn't set to D0 */ | 1297 | /* i915 resume handler doesn't set to D0 */ | |
1292 | pci_set_power_state(dev->pdev, PCI_D0); | 1298 | pci_set_power_state(dev->pdev, PCI_D0); | |
1293 | i915_resume(dev); | 1299 | i915_resume(dev); | |
1294 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | 1300 | dev->switch_power_state = DRM_SWITCH_POWER_ON; | |
1295 | } else { | 1301 | } else { | |
1296 | pr_err("switched off\n"); | 1302 | pr_err("switched off\n"); | |
1297 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | 1303 | dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | |
1298 | i915_suspend(dev, pmm); | 1304 | i915_suspend(dev, pmm); | |
1299 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | 1305 | dev->switch_power_state = DRM_SWITCH_POWER_OFF; | |
1300 | } | 1306 | } | |
1301 | } | 1307 | } | |
1302 | 1308 | |||
1303 | static bool i915_switcheroo_can_switch(struct pci_dev *pdev) | 1309 | static bool i915_switcheroo_can_switch(struct pci_dev *pdev) | |
1304 | { | 1310 | { | |
1305 | struct drm_device *dev = pci_get_drvdata(pdev); | 1311 | struct drm_device *dev = pci_get_drvdata(pdev); | |
1306 | bool can_switch; | 1312 | bool can_switch; | |
1307 | 1313 | |||
1308 | spin_lock(&dev->count_lock); | 1314 | spin_lock(&dev->count_lock); | |
1309 | can_switch = (dev->open_count == 0); | 1315 | can_switch = (dev->open_count == 0); | |
1310 | spin_unlock(&dev->count_lock); | 1316 | spin_unlock(&dev->count_lock); | |
1311 | return can_switch; | 1317 | return can_switch; | |
1312 | } | 1318 | } | |
1313 | 1319 | |||
1314 | static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { | 1320 | static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { | |
1315 | .set_gpu_state = i915_switcheroo_set_state, | 1321 | .set_gpu_state = i915_switcheroo_set_state, | |
1316 | .reprobe = NULL, | 1322 | .reprobe = NULL, | |
1317 | .can_switch = i915_switcheroo_can_switch, | 1323 | .can_switch = i915_switcheroo_can_switch, | |
1318 | }; | 1324 | }; | |
1319 | #endif | 1325 | #endif | |
1320 | 1326 | |||
1321 | static int i915_load_modeset_init(struct drm_device *dev) | 1327 | static int i915_load_modeset_init(struct drm_device *dev) | |
1322 | { | 1328 | { | |
1323 | struct drm_i915_private *dev_priv = dev->dev_private; | 1329 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1324 | int ret; | 1330 | int ret; | |
1325 | 1331 | |||
1326 | ret = intel_parse_bios(dev); | 1332 | ret = intel_parse_bios(dev); | |
1327 | if (ret) | 1333 | if (ret) | |
1328 | DRM_INFO("failed to find VBIOS tables\n"); | 1334 | DRM_INFO("failed to find VBIOS tables\n"); | |
1329 | 1335 | |||
1330 | #ifndef __NetBSD__ /* XXX vga */ | 1336 | #ifndef __NetBSD__ /* XXX vga */ | |
1331 | /* If we have > 1 VGA cards, then we need to arbitrate access | 1337 | /* If we have > 1 VGA cards, then we need to arbitrate access | |
1332 | * to the common VGA resources. | 1338 | * to the common VGA resources. | |
1333 | * | 1339 | * | |
1334 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), | 1340 | * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), | |
1335 | * then we do not take part in VGA arbitration and the | 1341 | * then we do not take part in VGA arbitration and the | |
1336 | * vga_client_register() fails with -ENODEV. | 1342 | * vga_client_register() fails with -ENODEV. | |
1337 | */ | 1343 | */ | |
1338 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); | 1344 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); | |
1339 | if (ret && ret != -ENODEV) | 1345 | if (ret && ret != -ENODEV) | |
1340 | goto out; | 1346 | goto out; | |
1341 | #endif | 1347 | #endif | |
1342 | 1348 | |||
1343 | intel_register_dsm_handler(); | 1349 | intel_register_dsm_handler(); | |
1344 | 1350 | |||
1345 | #ifndef __NetBSD__ /* XXX vga */ | 1351 | #ifndef __NetBSD__ /* XXX vga */ | |
1346 | ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops); | 1352 | ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops); | |
1347 | if (ret) | 1353 | if (ret) | |
1348 | goto cleanup_vga_client; | 1354 | goto cleanup_vga_client; | |
1349 | #endif | 1355 | #endif | |
1350 | 1356 | |||
1351 | /* Initialise stolen first so that we may reserve preallocated | 1357 | /* Initialise stolen first so that we may reserve preallocated | |
1352 | * objects for the BIOS to KMS transition. | 1358 | * objects for the BIOS to KMS transition. | |
1353 | */ | 1359 | */ | |
1354 | ret = i915_gem_init_stolen(dev); | 1360 | ret = i915_gem_init_stolen(dev); | |
1355 | if (ret) | 1361 | if (ret) | |
1356 | goto cleanup_vga_switcheroo; | 1362 | goto cleanup_vga_switcheroo; | |
1357 | 1363 | |||
1358 | intel_modeset_init(dev); | 1364 | intel_modeset_init(dev); | |
1359 | 1365 | |||
1360 | ret = i915_gem_init(dev); | 1366 | ret = i915_gem_init(dev); | |
1361 | if (ret) | 1367 | if (ret) | |
1362 | goto cleanup_gem_stolen; | 1368 | goto cleanup_gem_stolen; | |
1363 | 1369 | |||
1364 | intel_modeset_gem_init(dev); | 1370 | intel_modeset_gem_init(dev); | |
1365 | 1371 | |||
1366 | INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); | 1372 | INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); | |
1367 | 1373 | |||
1368 | ret = drm_irq_install(dev); | 1374 | ret = drm_irq_install(dev); | |
1369 | if (ret) | 1375 | if (ret) | |
1370 | goto cleanup_gem; | 1376 | goto cleanup_gem; | |
1371 | 1377 | |||
1372 | /* Always safe in the mode setting case. */ | 1378 | /* Always safe in the mode setting case. */ | |
1373 | /* FIXME: do pre/post-mode set stuff in core KMS code */ | 1379 | /* FIXME: do pre/post-mode set stuff in core KMS code */ | |
1374 | dev->vblank_disable_allowed = 1; | 1380 | dev->vblank_disable_allowed = 1; | |
1375 | 1381 | |||
1376 | #ifndef __NetBSD__ /* XXX fb */ | 1382 | #ifndef __NetBSD__ /* XXX fb */ | |
1377 | ret = intel_fbdev_init(dev); | 1383 | ret = intel_fbdev_init(dev); | |
1378 | if (ret) | 1384 | if (ret) | |
1379 | goto cleanup_irq; | 1385 | goto cleanup_irq; | |
1380 | #endif | 1386 | #endif | |
1381 | 1387 | |||
1382 | drm_kms_helper_poll_init(dev); | 1388 | drm_kms_helper_poll_init(dev); | |
1383 | 1389 | |||
1384 | /* We're off and running w/KMS */ | 1390 | /* We're off and running w/KMS */ | |
1385 | dev_priv->mm.suspended = 0; | 1391 | dev_priv->mm.suspended = 0; | |
1386 | 1392 | |||
1387 | return 0; | 1393 | return 0; | |
1388 | 1394 | |||
1389 | #ifndef __NetBSD__ /* XXX fb */ | 1395 | #ifndef __NetBSD__ /* XXX fb */ | |
1390 | cleanup_irq: | 1396 | cleanup_irq: | |
1391 | drm_irq_uninstall(dev); | 1397 | drm_irq_uninstall(dev); | |
1392 | #endif | 1398 | #endif | |
1393 | cleanup_gem: | 1399 | cleanup_gem: | |
1394 | mutex_lock(&dev->struct_mutex); | 1400 | mutex_lock(&dev->struct_mutex); | |
1395 | i915_gem_cleanup_ringbuffer(dev); | 1401 | i915_gem_cleanup_ringbuffer(dev); | |
1396 | mutex_unlock(&dev->struct_mutex); | 1402 | mutex_unlock(&dev->struct_mutex); | |
1397 | i915_gem_cleanup_aliasing_ppgtt(dev); | 1403 | i915_gem_cleanup_aliasing_ppgtt(dev); | |
1398 | cleanup_gem_stolen: | 1404 | cleanup_gem_stolen: | |
1399 | i915_gem_cleanup_stolen(dev); | 1405 | i915_gem_cleanup_stolen(dev); | |
1400 | cleanup_vga_switcheroo: | 1406 | cleanup_vga_switcheroo: | |
1401 | #ifndef __NetBSD__ /* XXX vga */ | 1407 | #ifndef __NetBSD__ /* XXX vga */ | |
1402 | vga_switcheroo_unregister_client(dev->pdev); | 1408 | vga_switcheroo_unregister_client(dev->pdev); | |
1403 | cleanup_vga_client: | 1409 | cleanup_vga_client: | |
1404 | vga_client_register(dev->pdev, NULL, NULL, NULL); | 1410 | vga_client_register(dev->pdev, NULL, NULL, NULL); | |
1405 | out: | 1411 | out: | |
1406 | #endif | 1412 | #endif | |
1407 | return ret; | 1413 | return ret; | |
1408 | } | 1414 | } | |
1409 | 1415 | |||
1410 | int i915_master_create(struct drm_device *dev, struct drm_master *master) | 1416 | int i915_master_create(struct drm_device *dev, struct drm_master *master) | |
1411 | { | 1417 | { | |
1412 | struct drm_i915_master_private *master_priv; | 1418 | struct drm_i915_master_private *master_priv; | |
1413 | 1419 | |||
1414 | master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); | 1420 | master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); | |
1415 | if (!master_priv) | 1421 | if (!master_priv) | |
1416 | return -ENOMEM; | 1422 | return -ENOMEM; | |
1417 | 1423 | |||
1418 | master->driver_priv = master_priv; | 1424 | master->driver_priv = master_priv; | |
1419 | return 0; | 1425 | return 0; | |
1420 | } | 1426 | } | |
1421 | 1427 | |||
1422 | void i915_master_destroy(struct drm_device *dev, struct drm_master *master) | 1428 | void i915_master_destroy(struct drm_device *dev, struct drm_master *master) | |
1423 | { | 1429 | { | |
1424 | struct drm_i915_master_private *master_priv = master->driver_priv; | 1430 | struct drm_i915_master_private *master_priv = master->driver_priv; | |
1425 | 1431 | |||
1426 | if (!master_priv) | 1432 | if (!master_priv) | |
1427 | return; | 1433 | return; | |
1428 | 1434 | |||
1429 | kfree(master_priv); | 1435 | kfree(master_priv); | |
1430 | 1436 | |||
1431 | master->driver_priv = NULL; | 1437 | master->driver_priv = NULL; | |
1432 | } | 1438 | } | |
1433 | 1439 | |||
1434 | static void | 1440 | static void | |
1435 | i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base, | 1441 | i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base, | |
1436 | unsigned long size) | 1442 | unsigned long size) | |
1437 | { | 1443 | { | |
1438 | dev_priv->mm.gtt_mtrr = -1; | 1444 | dev_priv->mm.gtt_mtrr = -1; | |
1439 | 1445 | |||
1440 | #if defined(CONFIG_X86_PAT) | 1446 | #if defined(CONFIG_X86_PAT) | |
1441 | if (cpu_has_pat) | 1447 | if (cpu_has_pat) | |
1442 | return; | 1448 | return; | |
1443 | #endif | 1449 | #endif | |
1444 | 1450 | |||
1445 | /* Set up a WC MTRR for non-PAT systems. This is more common than | 1451 | /* Set up a WC MTRR for non-PAT systems. This is more common than | |
1446 | * one would think, because the kernel disables PAT on first | 1452 | * one would think, because the kernel disables PAT on first | |
1447 | * generation Core chips because WC PAT gets overridden by a UC | 1453 | * generation Core chips because WC PAT gets overridden by a UC | |
1448 | * MTRR if present. Even if a UC MTRR isn't present. | 1454 | * MTRR if present. Even if a UC MTRR isn't present. | |
1449 | */ | 1455 | */ | |
1450 | dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1); | 1456 | dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1); | |
1451 | if (dev_priv->mm.gtt_mtrr < 0) { | 1457 | if (dev_priv->mm.gtt_mtrr < 0) { | |
1452 | DRM_INFO("MTRR allocation failed. Graphics " | 1458 | DRM_INFO("MTRR allocation failed. Graphics " | |
1453 | "performance may suffer.\n"); | 1459 | "performance may suffer.\n"); | |
1454 | } | 1460 | } | |
1455 | } | 1461 | } | |
1456 | 1462 | |||
1457 | #ifndef __NetBSD__ /* XXX fb */ | 1463 | #ifndef __NetBSD__ /* XXX fb */ | |
1458 | static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | 1464 | static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | |
1459 | { | 1465 | { | |
1460 | struct apertures_struct *ap; | 1466 | struct apertures_struct *ap; | |
1461 | struct pci_dev *pdev = dev_priv->dev->pdev; | 1467 | struct pci_dev *pdev = dev_priv->dev->pdev; | |
1462 | bool primary; | 1468 | bool primary; | |
1463 | 1469 | |||
1464 | ap = alloc_apertures(1); | 1470 | ap = alloc_apertures(1); | |
1465 | if (!ap) | 1471 | if (!ap) | |
1466 | return; | 1472 | return; | |
1467 | 1473 | |||
1468 | ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr; | 1474 | ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr; | |
1469 | ap->ranges[0].size = | 1475 | ap->ranges[0].size = | |
1470 | dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | 1476 | dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | |
1471 | primary = | 1477 | primary = | |
1472 | pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | 1478 | pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | |
1473 | 1479 | |||
1474 | remove_conflicting_framebuffers(ap, "inteldrmfb", primary); | 1480 | remove_conflicting_framebuffers(ap, "inteldrmfb", primary); | |
1475 | 1481 | |||
1476 | kfree(ap); | 1482 | kfree(ap); | |
1477 | } | 1483 | } | |
1478 | #endif | 1484 | #endif | |
1479 | 1485 | |||
1480 | static void i915_dump_device_info(struct drm_i915_private *dev_priv) | 1486 | static void i915_dump_device_info(struct drm_i915_private *dev_priv) | |
1481 | { | 1487 | { | |
1482 | const struct intel_device_info *info = dev_priv->info; | 1488 | const struct intel_device_info *info = dev_priv->info; | |
1483 | 1489 | |||
1484 | #define DEV_INFO_FLAG(name) info->name ? #name "," : "" | 1490 | #define DEV_INFO_FLAG(name) info->name ? #name "," : "" | |
1485 | #define DEV_INFO_SEP , | 1491 | #define DEV_INFO_SEP , | |
1486 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" | 1492 | DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" | |
1487 | "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", | 1493 | "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", | |
1488 | info->gen, | 1494 | info->gen, | |
1489 | dev_priv->dev->pdev->device, | 1495 | dev_priv->dev->pdev->device, | |
1490 | DEV_INFO_FLAGS); | 1496 | DEV_INFO_FLAGS); | |
1491 | #undef DEV_INFO_FLAG | 1497 | #undef DEV_INFO_FLAG | |
1492 | #undef DEV_INFO_SEP | 1498 | #undef DEV_INFO_SEP | |
1493 | } | 1499 | } | |
1494 | 1500 | |||
1495 | /** | 1501 | /** | |
1496 | * i915_driver_load - setup chip and create an initial config | 1502 | * i915_driver_load - setup chip and create an initial config | |
1497 | * @dev: DRM device | 1503 | * @dev: DRM device | |
1498 | * @flags: startup flags | 1504 | * @flags: startup flags | |
1499 | * | 1505 | * | |
1500 | * The driver load routine has to do several things: | 1506 | * The driver load routine has to do several things: | |
1501 | * - drive output discovery via intel_modeset_init() | 1507 | * - drive output discovery via intel_modeset_init() | |
1502 | * - initialize the memory manager | 1508 | * - initialize the memory manager | |
1503 | * - allocate initial config memory | 1509 | * - allocate initial config memory | |
1504 | * - setup the DRM framebuffer with the allocated memory | 1510 | * - setup the DRM framebuffer with the allocated memory | |
1505 | */ | 1511 | */ | |
1506 | int i915_driver_load(struct drm_device *dev, unsigned long flags) | 1512 | int i915_driver_load(struct drm_device *dev, unsigned long flags) | |
1507 | { | 1513 | { | |
1508 | struct drm_i915_private *dev_priv; | 1514 | struct drm_i915_private *dev_priv; | |
1509 | struct intel_device_info *info; | 1515 | struct intel_device_info *info; | |
1510 | int ret = 0, mmio_bar, mmio_size; | 1516 | int ret = 0, mmio_bar, mmio_size; | |
1511 | uint32_t aperture_size; | 1517 | uint32_t aperture_size; | |
1512 | 1518 | |||
1513 | info = (struct intel_device_info *) flags; | 1519 | info = (struct intel_device_info *) flags; | |
1514 | 1520 | |||
1515 | /* Refuse to load on gen6+ without kms enabled. */ | 1521 | /* Refuse to load on gen6+ without kms enabled. */ | |
1516 | if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) | 1522 | if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) | |
1517 | return -ENODEV; | 1523 | return -ENODEV; | |
1518 | 1524 | |||
1519 | /* i915 has 4 more counters */ | 1525 | /* i915 has 4 more counters */ | |
1520 | dev->counters += 4; | 1526 | dev->counters += 4; | |
1521 | dev->types[6] = _DRM_STAT_IRQ; | 1527 | dev->types[6] = _DRM_STAT_IRQ; | |
1522 | dev->types[7] = _DRM_STAT_PRIMARY; | 1528 | dev->types[7] = _DRM_STAT_PRIMARY; | |
1523 | dev->types[8] = _DRM_STAT_SECONDARY; | 1529 | dev->types[8] = _DRM_STAT_SECONDARY; | |
1524 | dev->types[9] = _DRM_STAT_DMA; | 1530 | dev->types[9] = _DRM_STAT_DMA; | |
1525 | 1531 | |||
1526 | dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); | 1532 | dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); | |
1527 | if (dev_priv == NULL) | 1533 | if (dev_priv == NULL) | |
1528 | return -ENOMEM; | 1534 | return -ENOMEM; | |
1529 | 1535 | |||
1530 | dev->dev_private = (void *)dev_priv; | 1536 | dev->dev_private = (void *)dev_priv; | |
1531 | dev_priv->dev = dev; | 1537 | dev_priv->dev = dev; | |
1532 | dev_priv->info = info; | 1538 | dev_priv->info = info; | |
1533 | 1539 | |||
1534 | i915_dump_device_info(dev_priv); | 1540 | i915_dump_device_info(dev_priv); | |
1535 | 1541 | |||
1536 | if (i915_get_bridge_dev(dev)) { | 1542 | if (i915_get_bridge_dev(dev)) { | |
1537 | ret = -EIO; | 1543 | ret = -EIO; | |
1538 | goto free_priv; | 1544 | goto free_priv; | |
1539 | } | 1545 | } | |
1540 | 1546 | |||
1541 | ret = i915_gem_gtt_init(dev); | 1547 | ret = i915_gem_gtt_init(dev); | |
1542 | if (ret) | 1548 | if (ret) | |
1543 | goto put_bridge; | 1549 | goto put_bridge; | |
1544 | 1550 | |||
1545 | #ifndef __NetBSD__ /* XXX fb */ | 1551 | #ifndef __NetBSD__ /* XXX fb */ | |
1546 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 1552 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
1547 | i915_kick_out_firmware_fb(dev_priv); | 1553 | i915_kick_out_firmware_fb(dev_priv); | |
1548 | #endif | 1554 | #endif | |
1549 | 1555 | |||
1550 | pci_set_master(dev->pdev); | 1556 | pci_set_master(dev->pdev); | |
1551 | 1557 | |||
1552 | /* overlay on gen2 is broken and can't address above 1G */ | 1558 | /* overlay on gen2 is broken and can't address above 1G */ | |
1553 | if (IS_GEN2(dev)) | 1559 | if (IS_GEN2(dev)) | |
1554 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | 1560 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | |
1555 | 1561 | |||
1556 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) | 1562 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) | |
1557 | * using 32bit addressing, overwriting memory if HWS is located | 1563 | * using 32bit addressing, overwriting memory if HWS is located | |
1558 | * above 4GB. | 1564 | * above 4GB. | |
1559 | * | 1565 | * | |
1560 | * The documentation also mentions an issue with undefined | 1566 | * The documentation also mentions an issue with undefined | |
1561 | * behaviour if any general state is accessed within a page above 4GB, | 1567 | * behaviour if any general state is accessed within a page above 4GB, | |
1562 | * which also needs to be handled carefully. | 1568 | * which also needs to be handled carefully. | |
1563 | */ | 1569 | */ | |
1564 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | 1570 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | |
1565 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); | 1571 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); | |
1566 | 1572 | |||
1567 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | 1573 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | |
1568 | /* Before gen4, the registers and the GTT are behind different BARs. | 1574 | /* Before gen4, the registers and the GTT are behind different BARs. | |
1569 | * However, from gen4 onwards, the registers and the GTT are shared | 1575 | * However, from gen4 onwards, the registers and the GTT are shared | |
1570 | * in the same BAR, so we want to restrict this ioremap from | 1576 | * in the same BAR, so we want to restrict this ioremap from | |
1571 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, | 1577 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, | |
1572 | * the register BAR remains the same size for all the earlier | 1578 | * the register BAR remains the same size for all the earlier | |
1573 | * generations up to Ironlake. | 1579 | * generations up to Ironlake. | |
1574 | */ | 1580 | */ | |
1575 | if (info->gen < 5) | 1581 | if (info->gen < 5) | |
1576 | mmio_size = 512*1024; | 1582 | mmio_size = 512*1024; | |
1577 | else | 1583 | else | |
1578 | mmio_size = 2*1024*1024; | 1584 | mmio_size = 2*1024*1024; | |
1579 | 1585 | |||
1580 | #ifdef __NetBSD__ | 1586 | #ifdef __NetBSD__ | |
1581 | /* XXX Maybe it would be better to just use pci_mapreg_map... */ | 1587 | /* XXX Maybe it would be better to just use pci_mapreg_map... */ | |
1582 | { | 1588 | { | |
1583 | bus_addr_t addr; | 1589 | bus_addr_t addr; | |
1584 | bus_size_t size; | 1590 | bus_size_t size; | |
1585 | 1591 | |||
1586 | if (pci_mapreg_info(dev->pdev->pd_pa.pa_pc, | 1592 | if (pci_mapreg_info(dev->pdev->pd_pa.pa_pc, | |
1587 | dev->pdev->pd_pa.pa_tag, mmio_bar, PCI_MAPREG_TYPE_MEM, | 1593 | dev->pdev->pd_pa.pa_tag, mmio_bar, PCI_MAPREG_TYPE_MEM, | |
1588 | &addr, &size, NULL /* XXX flags? */)) { | 1594 | &addr, &size, NULL /* XXX flags? */)) { | |
1589 | ret = -EIO; /* XXX */ | 1595 | ret = -EIO; /* XXX */ | |
1590 | goto put_gmch; | 1596 | goto put_gmch; | |
1591 | } | 1597 | } | |
1592 | 1598 | |||
1593 | ret = drm_addmap(dev, addr, size, _DRM_REGISTERS, | 1599 | ret = drm_addmap(dev, addr, size, _DRM_REGISTERS, | |
1594 | (_DRM_KERNEL | _DRM_DRIVER), &dev_priv->regs_map); | 1600 | (_DRM_KERNEL | _DRM_DRIVER), &dev_priv->regs_map); | |
1595 | if (ret) | 1601 | if (ret) | |
1596 | goto put_gmch; | 1602 | goto put_gmch; | |
1597 | } | 1603 | } | |
1598 | #else | 1604 | #else | |
1599 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); | 1605 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); | |
1600 | if (!dev_priv->regs) { | 1606 | if (!dev_priv->regs) { | |
1601 | DRM_ERROR("failed to map registers\n"); | 1607 | DRM_ERROR("failed to map registers\n"); | |
1602 | ret = -EIO; | 1608 | ret = -EIO; | |
1603 | goto put_gmch; | 1609 | goto put_gmch; | |
1604 | } | 1610 | } | |
1605 | #endif | 1611 | #endif | |
1606 | 1612 | |||
1607 | aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | 1613 | aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | |
1608 | dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr; | 1614 | dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr; | |
1609 | 1615 | |||
1610 | dev_priv->mm.gtt_mapping = | 1616 | dev_priv->mm.gtt_mapping = | |
1611 | io_mapping_create_wc(dev_priv->mm.gtt_base_addr, | 1617 | io_mapping_create_wc(dev_priv->mm.gtt_base_addr, | |
1612 | aperture_size); | 1618 | aperture_size); | |
1613 | if (dev_priv->mm.gtt_mapping == NULL) { | 1619 | if (dev_priv->mm.gtt_mapping == NULL) { | |
1614 | ret = -EIO; | 1620 | ret = -EIO; | |
1615 | goto out_rmmap; | 1621 | goto out_rmmap; | |
1616 | } | 1622 | } | |
1617 | 1623 | |||
1618 | i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, | 1624 | i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, | |
1619 | aperture_size); | 1625 | aperture_size); | |
1620 | 1626 | |||
1621 | /* The i915 workqueue is primarily used for batched retirement of | 1627 | /* The i915 workqueue is primarily used for batched retirement of | |
1622 | * requests (and thus managing bo) once the task has been completed | 1628 | * requests (and thus managing bo) once the task has been completed | |
1623 | * by the GPU. i915_gem_retire_requests() is called directly when we | 1629 | * by the GPU. i915_gem_retire_requests() is called directly when we | |
1624 | * need high-priority retirement, such as waiting for an explicit | 1630 | * need high-priority retirement, such as waiting for an explicit | |
1625 | * bo. | 1631 | * bo. | |
1626 | * | 1632 | * | |
1627 | * It is also used for periodic low-priority events, such as | 1633 | * It is also used for periodic low-priority events, such as | |
1628 | * idle-timers and recording error state. | 1634 | * idle-timers and recording error state. | |
1629 | * | 1635 | * | |
1630 | * All tasks on the workqueue are expected to acquire the dev mutex | 1636 | * All tasks on the workqueue are expected to acquire the dev mutex | |
1631 | * so there is no point in running more than one instance of the | 1637 | * so there is no point in running more than one instance of the | |
1632 | * workqueue at any time. Use an ordered one. | 1638 | * workqueue at any time. Use an ordered one. | |
1633 | */ | 1639 | */ | |
1634 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); | 1640 | dev_priv->wq = alloc_ordered_workqueue("i915", 0); | |
1635 | if (dev_priv->wq == NULL) { | 1641 | if (dev_priv->wq == NULL) { | |
1636 | DRM_ERROR("Failed to create our workqueue.\n"); | 1642 | DRM_ERROR("Failed to create our workqueue.\n"); | |
1637 | ret = -ENOMEM; | 1643 | ret = -ENOMEM; | |
1638 | goto out_mtrrfree; | 1644 | goto out_mtrrfree; | |
1639 | } | 1645 | } | |
1640 | 1646 | |||
1641 | /* This must be called before any calls to HAS_PCH_* */ | 1647 | /* This must be called before any calls to HAS_PCH_* */ | |
1642 | intel_detect_pch(dev); | 1648 | intel_detect_pch(dev); | |
1643 | 1649 | |||
1644 | intel_irq_init(dev); | 1650 | intel_irq_init(dev); | |
1645 | intel_gt_init(dev); | 1651 | intel_gt_init(dev); | |
1646 | 1652 | |||
1647 | /* Try to make sure MCHBAR is enabled before poking at it */ | 1653 | /* Try to make sure MCHBAR is enabled before poking at it */ | |
1648 | intel_setup_mchbar(dev); | 1654 | intel_setup_mchbar(dev); | |
1649 | intel_setup_gmbus(dev); | 1655 | intel_setup_gmbus(dev); | |
1650 | intel_opregion_setup(dev); | 1656 | intel_opregion_setup(dev); | |
1651 | 1657 | |||
1652 | intel_setup_bios(dev); | 1658 | intel_setup_bios(dev); | |
1653 | 1659 | |||
1654 | i915_gem_load(dev); | 1660 | i915_gem_load(dev); | |
1655 | 1661 | |||
1656 | /* On the 945G/GM, the chipset reports the MSI capability on the | 1662 | /* On the 945G/GM, the chipset reports the MSI capability on the | |
1657 | * integrated graphics even though the support isn't actually there | 1663 | * integrated graphics even though the support isn't actually there | |
1658 | * according to the published specs. It doesn't appear to function | 1664 | * according to the published specs. It doesn't appear to function | |
1659 | * correctly in testing on 945G. | 1665 | * correctly in testing on 945G. | |
1660 | * This may be a side effect of MSI having been made available for PEG | 1666 | * This may be a side effect of MSI having been made available for PEG | |
1661 | * and the registers being closely associated. | 1667 | * and the registers being closely associated. | |
1662 | * | 1668 | * | |
1663 | * According to chipset errata, on the 965GM, MSI interrupts may | 1669 | * According to chipset errata, on the 965GM, MSI interrupts may | |
1664 | * be lost or delayed, but we use them anyways to avoid | 1670 | * be lost or delayed, but we use them anyways to avoid | |
1665 | * stuck interrupts on some machines. | 1671 | * stuck interrupts on some machines. | |
1666 | */ | 1672 | */ | |
1667 | if (!IS_I945G(dev) && !IS_I945GM(dev)) | 1673 | if (!IS_I945G(dev) && !IS_I945GM(dev)) | |
1668 | pci_enable_msi(dev->pdev); | 1674 | pci_enable_msi(dev->pdev); | |
1669 | 1675 | |||
1670 | spin_lock_init(&dev_priv->irq_lock); | 1676 | spin_lock_init(&dev_priv->irq_lock); | |
1671 | spin_lock_init(&dev_priv->error_lock); | 1677 | spin_lock_init(&dev_priv->error_lock); | |
1672 | spin_lock_init(&dev_priv->rps.lock); | 1678 | spin_lock_init(&dev_priv->rps.lock); | |
1673 | spin_lock_init(&dev_priv->dpio_lock); | 1679 | spin_lock_init(&dev_priv->dpio_lock); | |
1674 | 1680 | |||
1675 | mutex_init(&dev_priv->rps.hw_lock); | 1681 | mutex_init(&dev_priv->rps.hw_lock); | |
1676 | 1682 | |||
1677 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 1683 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | |
1678 | dev_priv->num_pipe = 3; | 1684 | dev_priv->num_pipe = 3; | |
1679 | else if (IS_MOBILE(dev) || !IS_GEN2(dev)) | 1685 | else if (IS_MOBILE(dev) || !IS_GEN2(dev)) | |
1680 | dev_priv->num_pipe = 2; | 1686 | dev_priv->num_pipe = 2; | |
1681 | else | 1687 | else | |
1682 | dev_priv->num_pipe = 1; | 1688 | dev_priv->num_pipe = 1; | |
1683 | 1689 | |||
1684 | ret = drm_vblank_init(dev, dev_priv->num_pipe); | 1690 | ret = drm_vblank_init(dev, dev_priv->num_pipe); | |
1685 | if (ret) | 1691 | if (ret) | |
1686 | goto out_gem_unload; | 1692 | goto out_gem_unload; | |
1687 | 1693 | |||
1688 | /* Start out suspended */ | 1694 | /* Start out suspended */ | |
1689 | dev_priv->mm.suspended = 1; | 1695 | dev_priv->mm.suspended = 1; | |
1690 | 1696 | |||
1691 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 1697 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | |
1692 | ret = i915_load_modeset_init(dev); | 1698 | ret = i915_load_modeset_init(dev); | |
1693 | if (ret < 0) { | 1699 | if (ret < 0) { | |
1694 | DRM_ERROR("failed to init modeset\n"); | 1700 | DRM_ERROR("failed to init modeset\n"); | |
1695 | goto out_gem_unload; | 1701 | goto out_gem_unload; | |
1696 | } | 1702 | } | |
1697 | } | 1703 | } | |
1698 | 1704 | |||
1699 | i915_setup_sysfs(dev); | 1705 | i915_setup_sysfs(dev); | |
1700 | 1706 | |||
1701 | /* Must be done after probing outputs */ | 1707 | /* Must be done after probing outputs */ | |
1702 | intel_opregion_init(dev); | 1708 | intel_opregion_init(dev); | |
1703 | acpi_video_register(); | 1709 | acpi_video_register(); | |
1704 | 1710 | |||
1705 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, | 1711 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, | |
1706 | (unsigned long) dev); | 1712 | (unsigned long) dev); | |
1707 | 1713 | |||
1708 | if (IS_GEN5(dev)) | 1714 | if (IS_GEN5(dev)) | |
1709 | intel_gpu_ips_init(dev_priv); | 1715 | intel_gpu_ips_init(dev_priv); | |
1710 | 1716 | |||
1711 | return 0; | 1717 | return 0; | |
1712 | 1718 | |||
1713 | out_gem_unload: | 1719 | out_gem_unload: | |
1714 | if (dev_priv->mm.inactive_shrinker.shrink) | 1720 | if (dev_priv->mm.inactive_shrinker.shrink) | |
1715 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | 1721 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | |
1716 | 1722 | |||
1717 | if (dev->pdev->msi_enabled) | 1723 | if (dev->pdev->msi_enabled) | |
1718 | pci_disable_msi(dev->pdev); | 1724 | pci_disable_msi(dev->pdev); | |
1719 | 1725 | |||
1720 | intel_teardown_gmbus(dev); | 1726 | intel_teardown_gmbus(dev); | |
1721 | intel_teardown_mchbar(dev); | 1727 | intel_teardown_mchbar(dev); | |
1722 | destroy_workqueue(dev_priv->wq); | 1728 | destroy_workqueue(dev_priv->wq); | |
1723 | out_mtrrfree: | 1729 | out_mtrrfree: | |
1724 | if (dev_priv->mm.gtt_mtrr >= 0) { | 1730 | if (dev_priv->mm.gtt_mtrr >= 0) { | |
1725 | mtrr_del(dev_priv->mm.gtt_mtrr, | 1731 | mtrr_del(dev_priv->mm.gtt_mtrr, | |
1726 | dev_priv->mm.gtt_base_addr, | 1732 | dev_priv->mm.gtt_base_addr, | |
1727 | aperture_size); | 1733 | aperture_size); | |
1728 | dev_priv->mm.gtt_mtrr = -1; | 1734 | dev_priv->mm.gtt_mtrr = -1; | |
1729 | } | 1735 | } | |
1730 | io_mapping_free(dev_priv->mm.gtt_mapping); | 1736 | io_mapping_free(dev_priv->mm.gtt_mapping); | |
1731 | out_rmmap: | 1737 | out_rmmap: | |
1732 | #ifdef __NetBSD__ | 1738 | #ifdef __NetBSD__ | |
1733 | (void)drm_rmmap(dev, dev_priv->regs_map); | 1739 | (void)drm_rmmap(dev, dev_priv->regs_map); | |
1734 | #else | 1740 | #else | |
1735 | pci_iounmap(dev->pdev, dev_priv->regs); | 1741 | pci_iounmap(dev->pdev, dev_priv->regs); | |
1736 | #endif | 1742 | #endif | |
1737 | put_gmch: | 1743 | put_gmch: | |
1738 | i915_gem_gtt_fini(dev); | 1744 | i915_gem_gtt_fini(dev); | |
1739 | put_bridge: | 1745 | put_bridge: | |
1740 | pci_dev_put(dev_priv->bridge_dev); | 1746 | pci_dev_put(dev_priv->bridge_dev); | |
1741 | free_priv: | 1747 | free_priv: | |
1742 | kfree(dev_priv); | 1748 | kfree(dev_priv); | |
1743 | return ret; | 1749 | return ret; | |
1744 | } | 1750 | } | |
1745 | 1751 | |||
1746 | int i915_driver_unload(struct drm_device *dev) | 1752 | int i915_driver_unload(struct drm_device *dev) | |
1747 | { | 1753 | { | |
1748 | struct drm_i915_private *dev_priv = dev->dev_private; | 1754 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1749 | int ret; | 1755 | int ret; | |
1750 | 1756 | |||
1751 | intel_gpu_ips_teardown(); | 1757 | intel_gpu_ips_teardown(); | |
1752 | 1758 | |||
1753 | i915_teardown_sysfs(dev); | 1759 | i915_teardown_sysfs(dev); | |
1754 | 1760 | |||
1755 | if (dev_priv->mm.inactive_shrinker.shrink) | 1761 | if (dev_priv->mm.inactive_shrinker.shrink) | |
1756 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | 1762 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | |
1757 | 1763 | |||
1758 | mutex_lock(&dev->struct_mutex); | 1764 | mutex_lock(&dev->struct_mutex); | |
1759 | ret = i915_gpu_idle(dev); | 1765 | ret = i915_gpu_idle(dev); | |
1760 | if (ret) | 1766 | if (ret) | |
1761 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 1767 | DRM_ERROR("failed to idle hardware: %d\n", ret); | |
1762 | i915_gem_retire_requests(dev); | 1768 | i915_gem_retire_requests(dev); | |
1763 | mutex_unlock(&dev->struct_mutex); | 1769 | mutex_unlock(&dev->struct_mutex); | |
1764 | 1770 | |||
1765 | /* Cancel the retire work handler, which should be idle now. */ | 1771 | /* Cancel the retire work handler, which should be idle now. */ | |
1766 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | 1772 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | |
1767 | 1773 | |||
1768 | io_mapping_free(dev_priv->mm.gtt_mapping); | 1774 | io_mapping_free(dev_priv->mm.gtt_mapping); | |
1769 | if (dev_priv->mm.gtt_mtrr >= 0) { | 1775 | if (dev_priv->mm.gtt_mtrr >= 0) { | |
1770 | mtrr_del(dev_priv->mm.gtt_mtrr, | 1776 | mtrr_del(dev_priv->mm.gtt_mtrr, | |
1771 | dev_priv->mm.gtt_base_addr, | 1777 | dev_priv->mm.gtt_base_addr, | |
1772 | dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE); | 1778 | dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE); | |
1773 | dev_priv->mm.gtt_mtrr = -1; | 1779 | dev_priv->mm.gtt_mtrr = -1; | |
1774 | } | 1780 | } | |
1775 | 1781 | |||
1776 | acpi_video_unregister(); | 1782 | acpi_video_unregister(); | |
1777 | 1783 | |||
1778 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 1784 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | |
1779 | #ifndef __NetBSD__ /* XXX fb */ | 1785 | #ifndef __NetBSD__ /* XXX fb */ | |
1780 | intel_fbdev_fini(dev); | 1786 | intel_fbdev_fini(dev); | |
1781 | #endif | 1787 | #endif | |
1782 | intel_modeset_cleanup(dev); | 1788 | intel_modeset_cleanup(dev); | |
1783 | cancel_work_sync(&dev_priv->console_resume_work); | 1789 | cancel_work_sync(&dev_priv->console_resume_work); | |
1784 | 1790 | |||
1785 | /* | 1791 | /* | |
1786 | * free the memory space allocated for the child device | 1792 | * free the memory space allocated for the child device | |
1787 | * config parsed from VBT | 1793 | * config parsed from VBT | |
1788 | */ | 1794 | */ | |
1789 | if (dev_priv->child_dev && dev_priv->child_dev_num) { | 1795 | if (dev_priv->child_dev && dev_priv->child_dev_num) { | |
1790 | kfree(dev_priv->child_dev); | 1796 | kfree(dev_priv->child_dev); | |
1791 | dev_priv->child_dev = NULL; | 1797 | dev_priv->child_dev = NULL; | |
1792 | dev_priv->child_dev_num = 0; | 1798 | dev_priv->child_dev_num = 0; | |
1793 | } | 1799 | } | |
1794 | 1800 | |||
1795 | #ifndef __NetBSD__ /* XXX vga */ | 1801 | #ifndef __NetBSD__ /* XXX vga */ | |
1796 | vga_switcheroo_unregister_client(dev->pdev); | 1802 | vga_switcheroo_unregister_client(dev->pdev); | |
1797 | vga_client_register(dev->pdev, NULL, NULL, NULL); | 1803 | vga_client_register(dev->pdev, NULL, NULL, NULL); | |
1798 | #endif | 1804 | #endif | |
1799 | } | 1805 | } | |
1800 | 1806 | |||
1801 | /* Free error state after interrupts are fully disabled. */ | 1807 | /* Free error state after interrupts are fully disabled. */ | |
1802 | del_timer_sync(&dev_priv->hangcheck_timer); | 1808 | del_timer_sync(&dev_priv->hangcheck_timer); | |
1803 | cancel_work_sync(&dev_priv->error_work); | 1809 | cancel_work_sync(&dev_priv->error_work); | |
1804 | i915_destroy_error_state(dev); | 1810 | i915_destroy_error_state(dev); | |
1805 | 1811 |
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c 2013/07/23 21:28:22 1.1.1.1.2.2
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c 2013/07/24 03:06:00 1.1.1.1.2.3
@@ -32,2006 +32,2016 @@ | @@ -32,2006 +32,2016 @@ | |||
32 | #include "intel_drv.h" | 32 | #include "intel_drv.h" | |
33 | #include <linux/shmem_fs.h> | 33 | #include <linux/shmem_fs.h> | |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> | |
35 | #include <linux/swap.h> | 35 | #include <linux/swap.h> | |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> | |
37 | #include <linux/dma-buf.h> | 37 | #include <linux/dma-buf.h> | |
38 | 38 | |||
39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); | 39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); | |
40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); | 40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); | |
41 | static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | 41 | static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |
42 | unsigned alignment, | 42 | unsigned alignment, | |
43 | bool map_and_fenceable, | 43 | bool map_and_fenceable, | |
44 | bool nonblocking); | 44 | bool nonblocking); | |
45 | static int i915_gem_phys_pwrite(struct drm_device *dev, | 45 | static int i915_gem_phys_pwrite(struct drm_device *dev, | |
46 | struct drm_i915_gem_object *obj, | 46 | struct drm_i915_gem_object *obj, | |
47 | struct drm_i915_gem_pwrite *args, | 47 | struct drm_i915_gem_pwrite *args, | |
48 | struct drm_file *file); | 48 | struct drm_file *file); | |
49 | 49 | |||
50 | static void i915_gem_write_fence(struct drm_device *dev, int reg, | 50 | static void i915_gem_write_fence(struct drm_device *dev, int reg, | |
51 | struct drm_i915_gem_object *obj); | 51 | struct drm_i915_gem_object *obj); | |
52 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, | 52 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, | |
53 | struct drm_i915_fence_reg *fence, | 53 | struct drm_i915_fence_reg *fence, | |
54 | bool enable); | 54 | bool enable); | |
55 | 55 | |||
56 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, | 56 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, | |
57 | struct shrink_control *sc); | 57 | struct shrink_control *sc); | |
58 | static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); | 58 | static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); | |
59 | static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); | 59 | static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); | |
60 | static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); | 60 | static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); | |
61 | 61 | |||
62 | static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) | 62 | static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) | |
63 | { | 63 | { | |
64 | if (obj->tiling_mode) | 64 | if (obj->tiling_mode) | |
65 | i915_gem_release_mmap(obj); | 65 | i915_gem_release_mmap(obj); | |
66 | 66 | |||
67 | /* As we do not have an associated fence register, we will force | 67 | /* As we do not have an associated fence register, we will force | |
68 | * a tiling change if we ever need to acquire one. | 68 | * a tiling change if we ever need to acquire one. | |
69 | */ | 69 | */ | |
70 | obj->fence_dirty = false; | 70 | obj->fence_dirty = false; | |
71 | obj->fence_reg = I915_FENCE_REG_NONE; | 71 | obj->fence_reg = I915_FENCE_REG_NONE; | |
72 | } | 72 | } | |
73 | 73 | |||
74 | /* some bookkeeping */ | 74 | /* some bookkeeping */ | |
75 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, | 75 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, | |
76 | size_t size) | 76 | size_t size) | |
77 | { | 77 | { | |
78 | dev_priv->mm.object_count++; | 78 | dev_priv->mm.object_count++; | |
79 | dev_priv->mm.object_memory += size; | 79 | dev_priv->mm.object_memory += size; | |
80 | } | 80 | } | |
81 | 81 | |||
82 | static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, | 82 | static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, | |
83 | size_t size) | 83 | size_t size) | |
84 | { | 84 | { | |
85 | dev_priv->mm.object_count--; | 85 | dev_priv->mm.object_count--; | |
86 | dev_priv->mm.object_memory -= size; | 86 | dev_priv->mm.object_memory -= size; | |
87 | } | 87 | } | |
88 | 88 | |||
89 | static int | 89 | static int | |
90 | i915_gem_wait_for_error(struct drm_device *dev) | 90 | i915_gem_wait_for_error(struct drm_device *dev) | |
91 | { | 91 | { | |
92 | struct drm_i915_private *dev_priv = dev->dev_private; | 92 | struct drm_i915_private *dev_priv = dev->dev_private; | |
93 | struct completion *x = &dev_priv->error_completion; | 93 | struct completion *x = &dev_priv->error_completion; | |
94 | unsigned long flags; | 94 | unsigned long flags; | |
95 | int ret; | 95 | int ret; | |
96 | 96 | |||
97 | if (!atomic_read(&dev_priv->mm.wedged)) | 97 | if (!atomic_read(&dev_priv->mm.wedged)) | |
98 | return 0; | 98 | return 0; | |
99 | 99 | |||
100 | /* | 100 | /* | |
101 | * Only wait 10 seconds for the gpu reset to complete to avoid hanging | 101 | * Only wait 10 seconds for the gpu reset to complete to avoid hanging | |
102 | * userspace. If it takes that long something really bad is going on and | 102 | * userspace. If it takes that long something really bad is going on and | |
103 | * we should simply try to bail out and fail as gracefully as possible. | 103 | * we should simply try to bail out and fail as gracefully as possible. | |
104 | */ | 104 | */ | |
105 | ret = wait_for_completion_interruptible_timeout(x, 10*HZ); | 105 | ret = wait_for_completion_interruptible_timeout(x, 10*HZ); | |
106 | if (ret == 0) { | 106 | if (ret == 0) { | |
107 | DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); | 107 | DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); | |
108 | return -EIO; | 108 | return -EIO; | |
109 | } else if (ret < 0) { | 109 | } else if (ret < 0) { | |
110 | return ret; | 110 | return ret; | |
111 | } | 111 | } | |
112 | 112 | |||
113 | if (atomic_read(&dev_priv->mm.wedged)) { | 113 | if (atomic_read(&dev_priv->mm.wedged)) { | |
114 | /* GPU is hung, bump the completion count to account for | 114 | /* GPU is hung, bump the completion count to account for | |
115 | * the token we just consumed so that we never hit zero and | 115 | * the token we just consumed so that we never hit zero and | |
116 | * end up waiting upon a subsequent completion event that | 116 | * end up waiting upon a subsequent completion event that | |
117 | * will never happen. | 117 | * will never happen. | |
118 | */ | 118 | */ | |
119 | spin_lock_irqsave(&x->wait.lock, flags); | 119 | spin_lock_irqsave(&x->wait.lock, flags); | |
120 | x->done++; | 120 | x->done++; | |
121 | spin_unlock_irqrestore(&x->wait.lock, flags); | 121 | spin_unlock_irqrestore(&x->wait.lock, flags); | |
122 | } | 122 | } | |
123 | return 0; | 123 | return 0; | |
124 | } | 124 | } | |
125 | 125 | |||
126 | int i915_mutex_lock_interruptible(struct drm_device *dev) | 126 | int i915_mutex_lock_interruptible(struct drm_device *dev) | |
127 | { | 127 | { | |
128 | int ret; | 128 | int ret; | |
129 | 129 | |||
130 | ret = i915_gem_wait_for_error(dev); | 130 | ret = i915_gem_wait_for_error(dev); | |
131 | if (ret) | 131 | if (ret) | |
132 | return ret; | 132 | return ret; | |
133 | 133 | |||
134 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 134 | ret = mutex_lock_interruptible(&dev->struct_mutex); | |
135 | if (ret) | 135 | if (ret) | |
136 | return ret; | 136 | return ret; | |
137 | 137 | |||
138 | WARN_ON(i915_verify_lists(dev)); | 138 | WARN_ON(i915_verify_lists(dev)); | |
139 | return 0; | 139 | return 0; | |
140 | } | 140 | } | |
141 | 141 | |||
142 | static inline bool | 142 | static inline bool | |
143 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) | 143 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) | |
144 | { | 144 | { | |
145 | return obj->gtt_space && !obj->active; | 145 | return obj->gtt_space && !obj->active; | |
146 | } | 146 | } | |
147 | 147 | |||
148 | int | 148 | int | |
149 | i915_gem_init_ioctl(struct drm_device *dev, void *data, | 149 | i915_gem_init_ioctl(struct drm_device *dev, void *data, | |
150 | struct drm_file *file) | 150 | struct drm_file *file) | |
151 | { | 151 | { | |
152 | struct drm_i915_gem_init *args = data; | 152 | struct drm_i915_gem_init *args = data; | |
153 | 153 | |||
154 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 154 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
155 | return -ENODEV; | 155 | return -ENODEV; | |
156 | 156 | |||
157 | if (args->gtt_start >= args->gtt_end || | 157 | if (args->gtt_start >= args->gtt_end || | |
158 | (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) | 158 | (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) | |
159 | return -EINVAL; | 159 | return -EINVAL; | |
160 | 160 | |||
161 | /* GEM with user mode setting was never supported on ilk and later. */ | 161 | /* GEM with user mode setting was never supported on ilk and later. */ | |
162 | if (INTEL_INFO(dev)->gen >= 5) | 162 | if (INTEL_INFO(dev)->gen >= 5) | |
163 | return -ENODEV; | 163 | return -ENODEV; | |
164 | 164 | |||
165 | mutex_lock(&dev->struct_mutex); | 165 | mutex_lock(&dev->struct_mutex); | |
166 | i915_gem_init_global_gtt(dev, args->gtt_start, | 166 | i915_gem_init_global_gtt(dev, args->gtt_start, | |
167 | args->gtt_end, args->gtt_end); | 167 | args->gtt_end, args->gtt_end); | |
168 | mutex_unlock(&dev->struct_mutex); | 168 | mutex_unlock(&dev->struct_mutex); | |
169 | 169 | |||
170 | return 0; | 170 | return 0; | |
171 | } | 171 | } | |
172 | 172 | |||
173 | int | 173 | int | |
174 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | 174 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |
175 | struct drm_file *file) | 175 | struct drm_file *file) | |
176 | { | 176 | { | |
177 | struct drm_i915_private *dev_priv = dev->dev_private; | 177 | struct drm_i915_private *dev_priv = dev->dev_private; | |
178 | struct drm_i915_gem_get_aperture *args = data; | 178 | struct drm_i915_gem_get_aperture *args = data; | |
179 | struct drm_i915_gem_object *obj; | 179 | struct drm_i915_gem_object *obj; | |
180 | size_t pinned; | 180 | size_t pinned; | |
181 | 181 | |||
182 | pinned = 0; | 182 | pinned = 0; | |
183 | mutex_lock(&dev->struct_mutex); | 183 | mutex_lock(&dev->struct_mutex); | |
184 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) | 184 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) | |
185 | if (obj->pin_count) | 185 | if (obj->pin_count) | |
186 | pinned += obj->gtt_space->size; | 186 | pinned += obj->gtt_space->size; | |
187 | mutex_unlock(&dev->struct_mutex); | 187 | mutex_unlock(&dev->struct_mutex); | |
188 | 188 | |||
189 | args->aper_size = dev_priv->mm.gtt_total; | 189 | args->aper_size = dev_priv->mm.gtt_total; | |
190 | args->aper_available_size = args->aper_size - pinned; | 190 | args->aper_available_size = args->aper_size - pinned; | |
191 | 191 | |||
192 | return 0; | 192 | return 0; | |
193 | } | 193 | } | |
194 | 194 | |||
195 | static int | 195 | static int | |
196 | i915_gem_create(struct drm_file *file, | 196 | i915_gem_create(struct drm_file *file, | |
197 | struct drm_device *dev, | 197 | struct drm_device *dev, | |
198 | uint64_t size, | 198 | uint64_t size, | |
199 | uint32_t *handle_p) | 199 | uint32_t *handle_p) | |
200 | { | 200 | { | |
201 | struct drm_i915_gem_object *obj; | 201 | struct drm_i915_gem_object *obj; | |
202 | int ret; | 202 | int ret; | |
203 | u32 handle; | 203 | u32 handle; | |
204 | 204 | |||
205 | size = roundup(size, PAGE_SIZE); | 205 | size = roundup(size, PAGE_SIZE); | |
206 | if (size == 0) | 206 | if (size == 0) | |
207 | return -EINVAL; | 207 | return -EINVAL; | |
208 | 208 | |||
209 | /* Allocate the new object */ | 209 | /* Allocate the new object */ | |
210 | obj = i915_gem_alloc_object(dev, size); | 210 | obj = i915_gem_alloc_object(dev, size); | |
211 | if (obj == NULL) | 211 | if (obj == NULL) | |
212 | return -ENOMEM; | 212 | return -ENOMEM; | |
213 | 213 | |||
214 | ret = drm_gem_handle_create(file, &obj->base, &handle); | 214 | ret = drm_gem_handle_create(file, &obj->base, &handle); | |
215 | if (ret) { | 215 | if (ret) { | |
216 | drm_gem_object_release(&obj->base); | 216 | drm_gem_object_release(&obj->base); | |
217 | i915_gem_info_remove_obj(dev->dev_private, obj->base.size); | 217 | i915_gem_info_remove_obj(dev->dev_private, obj->base.size); | |
218 | kfree(obj); | 218 | kfree(obj); | |
219 | return ret; | 219 | return ret; | |
220 | } | 220 | } | |
221 | 221 | |||
222 | /* drop reference from allocate - handle holds it now */ | 222 | /* drop reference from allocate - handle holds it now */ | |
223 | drm_gem_object_unreference(&obj->base); | 223 | drm_gem_object_unreference(&obj->base); | |
224 | trace_i915_gem_object_create(obj); | 224 | trace_i915_gem_object_create(obj); | |
225 | 225 | |||
226 | *handle_p = handle; | 226 | *handle_p = handle; | |
227 | return 0; | 227 | return 0; | |
228 | } | 228 | } | |
229 | 229 | |||
230 | int | 230 | int | |
231 | i915_gem_dumb_create(struct drm_file *file, | 231 | i915_gem_dumb_create(struct drm_file *file, | |
232 | struct drm_device *dev, | 232 | struct drm_device *dev, | |
233 | struct drm_mode_create_dumb *args) | 233 | struct drm_mode_create_dumb *args) | |
234 | { | 234 | { | |
235 | /* have to work out size/pitch and return them */ | 235 | /* have to work out size/pitch and return them */ | |
236 | args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); | 236 | args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); | |
237 | args->size = args->pitch * args->height; | 237 | args->size = args->pitch * args->height; | |
238 | return i915_gem_create(file, dev, | 238 | return i915_gem_create(file, dev, | |
239 | args->size, &args->handle); | 239 | args->size, &args->handle); | |
240 | } | 240 | } | |
241 | 241 | |||
242 | int i915_gem_dumb_destroy(struct drm_file *file, | 242 | int i915_gem_dumb_destroy(struct drm_file *file, | |
243 | struct drm_device *dev, | 243 | struct drm_device *dev, | |
244 | uint32_t handle) | 244 | uint32_t handle) | |
245 | { | 245 | { | |
246 | return drm_gem_handle_delete(file, handle); | 246 | return drm_gem_handle_delete(file, handle); | |
247 | } | 247 | } | |
248 | 248 | |||
249 | /** | 249 | /** | |
250 | * Creates a new mm object and returns a handle to it. | 250 | * Creates a new mm object and returns a handle to it. | |
251 | */ | 251 | */ | |
252 | int | 252 | int | |
253 | i915_gem_create_ioctl(struct drm_device *dev, void *data, | 253 | i915_gem_create_ioctl(struct drm_device *dev, void *data, | |
254 | struct drm_file *file) | 254 | struct drm_file *file) | |
255 | { | 255 | { | |
256 | struct drm_i915_gem_create *args = data; | 256 | struct drm_i915_gem_create *args = data; | |
257 | 257 | |||
258 | return i915_gem_create(file, dev, | 258 | return i915_gem_create(file, dev, | |
259 | args->size, &args->handle); | 259 | args->size, &args->handle); | |
260 | } | 260 | } | |
261 | 261 | |||
262 | static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) | 262 | static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) | |
263 | { | 263 | { | |
264 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; | 264 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; | |
265 | 265 | |||
266 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | 266 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | |
267 | obj->tiling_mode != I915_TILING_NONE; | 267 | obj->tiling_mode != I915_TILING_NONE; | |
268 | } | 268 | } | |
269 | 269 | |||
270 | static inline int | 270 | static inline int | |
271 | __copy_to_user_swizzled(char __user *cpu_vaddr, | 271 | __copy_to_user_swizzled(char __user *cpu_vaddr, | |
272 | const char *gpu_vaddr, int gpu_offset, | 272 | const char *gpu_vaddr, int gpu_offset, | |
273 | int length) | 273 | int length) | |
274 | { | 274 | { | |
275 | int ret, cpu_offset = 0; | 275 | int ret, cpu_offset = 0; | |
276 | 276 | |||
277 | while (length > 0) { | 277 | while (length > 0) { | |
278 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | 278 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | |
279 | int this_length = min(cacheline_end - gpu_offset, length); | 279 | int this_length = min(cacheline_end - gpu_offset, length); | |
280 | int swizzled_gpu_offset = gpu_offset ^ 64; | 280 | int swizzled_gpu_offset = gpu_offset ^ 64; | |
281 | 281 | |||
282 | ret = __copy_to_user(cpu_vaddr + cpu_offset, | 282 | ret = __copy_to_user(cpu_vaddr + cpu_offset, | |
283 | gpu_vaddr + swizzled_gpu_offset, | 283 | gpu_vaddr + swizzled_gpu_offset, | |
284 | this_length); | 284 | this_length); | |
285 | if (ret) | 285 | if (ret) | |
286 | return ret + length; | 286 | return ret + length; | |
287 | 287 | |||
288 | cpu_offset += this_length; | 288 | cpu_offset += this_length; | |
289 | gpu_offset += this_length; | 289 | gpu_offset += this_length; | |
290 | length -= this_length; | 290 | length -= this_length; | |
291 | } | 291 | } | |
292 | 292 | |||
293 | return 0; | 293 | return 0; | |
294 | } | 294 | } | |
295 | 295 | |||
296 | static inline int | 296 | static inline int | |
297 | __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, | 297 | __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, | |
298 | const char __user *cpu_vaddr, | 298 | const char __user *cpu_vaddr, | |
299 | int length) | 299 | int length) | |
300 | { | 300 | { | |
301 | int ret, cpu_offset = 0; | 301 | int ret, cpu_offset = 0; | |
302 | 302 | |||
303 | while (length > 0) { | 303 | while (length > 0) { | |
304 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | 304 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | |
305 | int this_length = min(cacheline_end - gpu_offset, length); | 305 | int this_length = min(cacheline_end - gpu_offset, length); | |
306 | int swizzled_gpu_offset = gpu_offset ^ 64; | 306 | int swizzled_gpu_offset = gpu_offset ^ 64; | |
307 | 307 | |||
308 | ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, | 308 | ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, | |
309 | cpu_vaddr + cpu_offset, | 309 | cpu_vaddr + cpu_offset, | |
310 | this_length); | 310 | this_length); | |
311 | if (ret) | 311 | if (ret) | |
312 | return ret + length; | 312 | return ret + length; | |
313 | 313 | |||
314 | cpu_offset += this_length; | 314 | cpu_offset += this_length; | |
315 | gpu_offset += this_length; | 315 | gpu_offset += this_length; | |
316 | length -= this_length; | 316 | length -= this_length; | |
317 | } | 317 | } | |
318 | 318 | |||
319 | return 0; | 319 | return 0; | |
320 | } | 320 | } | |
321 | 321 | |||
322 | /* Per-page copy function for the shmem pread fastpath. | 322 | /* Per-page copy function for the shmem pread fastpath. | |
323 | * Flushes invalid cachelines before reading the target if | 323 | * Flushes invalid cachelines before reading the target if | |
324 | * needs_clflush is set. */ | 324 | * needs_clflush is set. */ | |
325 | static int | 325 | static int | |
326 | shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, | 326 | shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, | |
327 | char __user *user_data, | 327 | char __user *user_data, | |
328 | bool page_do_bit17_swizzling, bool needs_clflush) | 328 | bool page_do_bit17_swizzling, bool needs_clflush) | |
329 | { | 329 | { | |
330 | char *vaddr; | 330 | char *vaddr; | |
331 | int ret; | 331 | int ret; | |
332 | 332 | |||
333 | if (unlikely(page_do_bit17_swizzling)) | 333 | if (unlikely(page_do_bit17_swizzling)) | |
334 | return -EINVAL; | 334 | return -EINVAL; | |
335 | 335 | |||
336 | vaddr = kmap_atomic(page); | 336 | vaddr = kmap_atomic(page); | |
337 | if (needs_clflush) | 337 | if (needs_clflush) | |
338 | drm_clflush_virt_range(vaddr + shmem_page_offset, | 338 | drm_clflush_virt_range(vaddr + shmem_page_offset, | |
339 | page_length); | 339 | page_length); | |
340 | ret = __copy_to_user_inatomic(user_data, | 340 | ret = __copy_to_user_inatomic(user_data, | |
341 | vaddr + shmem_page_offset, | 341 | vaddr + shmem_page_offset, | |
342 | page_length); | 342 | page_length); | |
343 | kunmap_atomic(vaddr); | 343 | kunmap_atomic(vaddr); | |
344 | 344 | |||
345 | return ret ? -EFAULT : 0; | 345 | return ret ? -EFAULT : 0; | |
346 | } | 346 | } | |
347 | 347 | |||
348 | static void | 348 | static void | |
349 | shmem_clflush_swizzled_range(char *addr, unsigned long length, | 349 | shmem_clflush_swizzled_range(char *addr, unsigned long length, | |
350 | bool swizzled) | 350 | bool swizzled) | |
351 | { | 351 | { | |
352 | if (unlikely(swizzled)) { | 352 | if (unlikely(swizzled)) { | |
353 | unsigned long start = (unsigned long) addr; | 353 | unsigned long start = (unsigned long) addr; | |
354 | unsigned long end = (unsigned long) addr + length; | 354 | unsigned long end = (unsigned long) addr + length; | |
355 | 355 | |||
356 | /* For swizzling simply ensure that we always flush both | 356 | /* For swizzling simply ensure that we always flush both | |
357 | * channels. Lame, but simple and it works. Swizzled | 357 | * channels. Lame, but simple and it works. Swizzled | |
358 | * pwrite/pread is far from a hotpath - current userspace | 358 | * pwrite/pread is far from a hotpath - current userspace | |
359 | * doesn't use it at all. */ | 359 | * doesn't use it at all. */ | |
360 | start = round_down(start, 128); | 360 | start = round_down(start, 128); | |
361 | end = round_up(end, 128); | 361 | end = round_up(end, 128); | |
362 | 362 | |||
363 | drm_clflush_virt_range((void *)start, end - start); | 363 | drm_clflush_virt_range((void *)start, end - start); | |
364 | } else { | 364 | } else { | |
365 | drm_clflush_virt_range(addr, length); | 365 | drm_clflush_virt_range(addr, length); | |
366 | } | 366 | } | |
367 | 367 | |||
368 | } | 368 | } | |
369 | 369 | |||
370 | /* Only difference to the fast-path function is that this can handle bit17 | 370 | /* Only difference to the fast-path function is that this can handle bit17 | |
371 | * and uses non-atomic copy and kmap functions. */ | 371 | * and uses non-atomic copy and kmap functions. */ | |
372 | static int | 372 | static int | |
373 | shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, | 373 | shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, | |
374 | char __user *user_data, | 374 | char __user *user_data, | |
375 | bool page_do_bit17_swizzling, bool needs_clflush) | 375 | bool page_do_bit17_swizzling, bool needs_clflush) | |
376 | { | 376 | { | |
377 | char *vaddr; | 377 | char *vaddr; | |
378 | int ret; | 378 | int ret; | |
379 | 379 | |||
380 | vaddr = kmap(page); | 380 | vaddr = kmap(page); | |
381 | if (needs_clflush) | 381 | if (needs_clflush) | |
382 | shmem_clflush_swizzled_range(vaddr + shmem_page_offset, | 382 | shmem_clflush_swizzled_range(vaddr + shmem_page_offset, | |
383 | page_length, | 383 | page_length, | |
384 | page_do_bit17_swizzling); | 384 | page_do_bit17_swizzling); | |
385 | 385 | |||
386 | if (page_do_bit17_swizzling) | 386 | if (page_do_bit17_swizzling) | |
387 | ret = __copy_to_user_swizzled(user_data, | 387 | ret = __copy_to_user_swizzled(user_data, | |
388 | vaddr, shmem_page_offset, | 388 | vaddr, shmem_page_offset, | |
389 | page_length); | 389 | page_length); | |
390 | else | 390 | else | |
391 | ret = __copy_to_user(user_data, | 391 | ret = __copy_to_user(user_data, | |
392 | vaddr + shmem_page_offset, | 392 | vaddr + shmem_page_offset, | |
393 | page_length); | 393 | page_length); | |
394 | kunmap(page); | 394 | kunmap(page); | |
395 | 395 | |||
396 | return ret ? - EFAULT : 0; | 396 | return ret ? - EFAULT : 0; | |
397 | } | 397 | } | |
398 | 398 | |||
399 | static int | 399 | static int | |
400 | i915_gem_shmem_pread(struct drm_device *dev, | 400 | i915_gem_shmem_pread(struct drm_device *dev, | |
401 | struct drm_i915_gem_object *obj, | 401 | struct drm_i915_gem_object *obj, | |
402 | struct drm_i915_gem_pread *args, | 402 | struct drm_i915_gem_pread *args, | |
403 | struct drm_file *file) | 403 | struct drm_file *file) | |
404 | { | 404 | { | |
405 | char __user *user_data; | 405 | char __user *user_data; | |
406 | ssize_t remain; | 406 | ssize_t remain; | |
407 | loff_t offset; | 407 | loff_t offset; | |
408 | int shmem_page_offset, page_length, ret = 0; | 408 | int shmem_page_offset, page_length, ret = 0; | |
409 | int obj_do_bit17_swizzling, page_do_bit17_swizzling; | 409 | int obj_do_bit17_swizzling, page_do_bit17_swizzling; | |
410 | int hit_slowpath = 0; | 410 | int hit_slowpath = 0; | |
411 | int prefaulted = 0; | 411 | int prefaulted = 0; | |
412 | int needs_clflush = 0; | 412 | int needs_clflush = 0; | |
413 | struct scatterlist *sg; | 413 | struct scatterlist *sg; | |
414 | int i; | 414 | int i; | |
415 | 415 | |||
416 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 416 | user_data = (char __user *) (uintptr_t) args->data_ptr; | |
417 | remain = args->size; | 417 | remain = args->size; | |
418 | 418 | |||
419 | obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 419 | obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | |
420 | 420 | |||
421 | if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { | 421 | if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { | |
422 | /* If we're not in the cpu read domain, set ourself into the gtt | 422 | /* If we're not in the cpu read domain, set ourself into the gtt | |
423 | * read domain and manually flush cachelines (if required). This | 423 | * read domain and manually flush cachelines (if required). This | |
424 | * optimizes for the case when the gpu will dirty the data | 424 | * optimizes for the case when the gpu will dirty the data | |
425 | * anyway again before the next pread happens. */ | 425 | * anyway again before the next pread happens. */ | |
426 | if (obj->cache_level == I915_CACHE_NONE) | 426 | if (obj->cache_level == I915_CACHE_NONE) | |
427 | needs_clflush = 1; | 427 | needs_clflush = 1; | |
428 | if (obj->gtt_space) { | 428 | if (obj->gtt_space) { | |
429 | ret = i915_gem_object_set_to_gtt_domain(obj, false); | 429 | ret = i915_gem_object_set_to_gtt_domain(obj, false); | |
430 | if (ret) | 430 | if (ret) | |
431 | return ret; | 431 | return ret; | |
432 | } | 432 | } | |
433 | } | 433 | } | |
434 | 434 | |||
435 | ret = i915_gem_object_get_pages(obj); | 435 | ret = i915_gem_object_get_pages(obj); | |
436 | if (ret) | 436 | if (ret) | |
437 | return ret; | 437 | return ret; | |
438 | 438 | |||
439 | i915_gem_object_pin_pages(obj); | 439 | i915_gem_object_pin_pages(obj); | |
440 | 440 | |||
441 | offset = args->offset; | 441 | offset = args->offset; | |
442 | 442 | |||
443 | for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { | 443 | for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { | |
444 | struct page *page; | 444 | struct page *page; | |
445 | 445 | |||
446 | if (i < offset >> PAGE_SHIFT) | 446 | if (i < offset >> PAGE_SHIFT) | |
447 | continue; | 447 | continue; | |
448 | 448 | |||
449 | if (remain <= 0) | 449 | if (remain <= 0) | |
450 | break; | 450 | break; | |
451 | 451 | |||
452 | /* Operation in this page | 452 | /* Operation in this page | |
453 | * | 453 | * | |
454 | * shmem_page_offset = offset within page in shmem file | 454 | * shmem_page_offset = offset within page in shmem file | |
455 | * page_length = bytes to copy for this page | 455 | * page_length = bytes to copy for this page | |
456 | */ | 456 | */ | |
457 | shmem_page_offset = offset_in_page(offset); | 457 | shmem_page_offset = offset_in_page(offset); | |
458 | page_length = remain; | 458 | page_length = remain; | |
459 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | 459 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | |
460 | page_length = PAGE_SIZE - shmem_page_offset; | 460 | page_length = PAGE_SIZE - shmem_page_offset; | |
461 | 461 | |||
462 | page = sg_page(sg); | 462 | page = sg_page(sg); | |
463 | page_do_bit17_swizzling = obj_do_bit17_swizzling && | 463 | page_do_bit17_swizzling = obj_do_bit17_swizzling && | |
464 | (page_to_phys(page) & (1 << 17)) != 0; | 464 | (page_to_phys(page) & (1 << 17)) != 0; | |
465 | 465 | |||
466 | ret = shmem_pread_fast(page, shmem_page_offset, page_length, | 466 | ret = shmem_pread_fast(page, shmem_page_offset, page_length, | |
467 | user_data, page_do_bit17_swizzling, | 467 | user_data, page_do_bit17_swizzling, | |
468 | needs_clflush); | 468 | needs_clflush); | |
469 | if (ret == 0) | 469 | if (ret == 0) | |
470 | goto next_page; | 470 | goto next_page; | |
471 | 471 | |||
472 | hit_slowpath = 1; | 472 | hit_slowpath = 1; | |
473 | mutex_unlock(&dev->struct_mutex); | 473 | mutex_unlock(&dev->struct_mutex); | |
474 | 474 | |||
475 | if (!prefaulted) { | 475 | if (!prefaulted) { | |
476 | ret = fault_in_multipages_writeable(user_data, remain); | 476 | ret = fault_in_multipages_writeable(user_data, remain); | |
477 | /* Userspace is tricking us, but we've already clobbered | 477 | /* Userspace is tricking us, but we've already clobbered | |
478 | * its pages with the prefault and promised to write the | 478 | * its pages with the prefault and promised to write the | |
479 | * data up to the first fault. Hence ignore any errors | 479 | * data up to the first fault. Hence ignore any errors | |
480 | * and just continue. */ | 480 | * and just continue. */ | |
481 | (void)ret; | 481 | (void)ret; | |
482 | prefaulted = 1; | 482 | prefaulted = 1; | |
483 | } | 483 | } | |
484 | 484 | |||
485 | ret = shmem_pread_slow(page, shmem_page_offset, page_length, | 485 | ret = shmem_pread_slow(page, shmem_page_offset, page_length, | |
486 | user_data, page_do_bit17_swizzling, | 486 | user_data, page_do_bit17_swizzling, | |
487 | needs_clflush); | 487 | needs_clflush); | |
488 | 488 | |||
489 | mutex_lock(&dev->struct_mutex); | 489 | mutex_lock(&dev->struct_mutex); | |
490 | 490 | |||
491 | next_page: | 491 | next_page: | |
492 | mark_page_accessed(page); | 492 | mark_page_accessed(page); | |
493 | 493 | |||
494 | if (ret) | 494 | if (ret) | |
495 | goto out; | 495 | goto out; | |
496 | 496 | |||
497 | remain -= page_length; | 497 | remain -= page_length; | |
498 | user_data += page_length; | 498 | user_data += page_length; | |
499 | offset += page_length; | 499 | offset += page_length; | |
500 | } | 500 | } | |
501 | 501 | |||
502 | out: | 502 | out: | |
503 | i915_gem_object_unpin_pages(obj); | 503 | i915_gem_object_unpin_pages(obj); | |
504 | 504 | |||
505 | if (hit_slowpath) { | 505 | if (hit_slowpath) { | |
506 | /* Fixup: Kill any reinstated backing storage pages */ | 506 | /* Fixup: Kill any reinstated backing storage pages */ | |
507 | if (obj->madv == __I915_MADV_PURGED) | 507 | if (obj->madv == __I915_MADV_PURGED) | |
508 | i915_gem_object_truncate(obj); | 508 | i915_gem_object_truncate(obj); | |
509 | } | 509 | } | |
510 | 510 | |||
511 | return ret; | 511 | return ret; | |
512 | } | 512 | } | |
513 | 513 | |||
514 | /** | 514 | /** | |
515 | * Reads data from the object referenced by handle. | 515 | * Reads data from the object referenced by handle. | |
516 | * | 516 | * | |
517 | * On error, the contents of *data are undefined. | 517 | * On error, the contents of *data are undefined. | |
518 | */ | 518 | */ | |
519 | int | 519 | int | |
520 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, | 520 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |
521 | struct drm_file *file) | 521 | struct drm_file *file) | |
522 | { | 522 | { | |
523 | struct drm_i915_gem_pread *args = data; | 523 | struct drm_i915_gem_pread *args = data; | |
524 | struct drm_i915_gem_object *obj; | 524 | struct drm_i915_gem_object *obj; | |
525 | int ret = 0; | 525 | int ret = 0; | |
526 | 526 | |||
527 | if (args->size == 0) | 527 | if (args->size == 0) | |
528 | return 0; | 528 | return 0; | |
529 | 529 | |||
530 | if (!access_ok(VERIFY_WRITE, | 530 | if (!access_ok(VERIFY_WRITE, | |
531 | (char __user *)(uintptr_t)args->data_ptr, | 531 | (char __user *)(uintptr_t)args->data_ptr, | |
532 | args->size)) | 532 | args->size)) | |
533 | return -EFAULT; | 533 | return -EFAULT; | |
534 | 534 | |||
535 | ret = i915_mutex_lock_interruptible(dev); | 535 | ret = i915_mutex_lock_interruptible(dev); | |
536 | if (ret) | 536 | if (ret) | |
537 | return ret; | 537 | return ret; | |
538 | 538 | |||
539 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 539 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | |
540 | if (&obj->base == NULL) { | 540 | if (&obj->base == NULL) { | |
541 | ret = -ENOENT; | 541 | ret = -ENOENT; | |
542 | goto unlock; | 542 | goto unlock; | |
543 | } | 543 | } | |
544 | 544 | |||
545 | /* Bounds check source. */ | 545 | /* Bounds check source. */ | |
546 | if (args->offset > obj->base.size || | 546 | if (args->offset > obj->base.size || | |
547 | args->size > obj->base.size - args->offset) { | 547 | args->size > obj->base.size - args->offset) { | |
548 | ret = -EINVAL; | 548 | ret = -EINVAL; | |
549 | goto out; | 549 | goto out; | |
550 | } | 550 | } | |
551 | 551 | |||
552 | /* prime objects have no backing filp to GEM pread/pwrite | 552 | /* prime objects have no backing filp to GEM pread/pwrite | |
553 | * pages from. | 553 | * pages from. | |
554 | */ | 554 | */ | |
555 | if (!obj->base.filp) { | 555 | if (!obj->base.filp) { | |
556 | ret = -EINVAL; | 556 | ret = -EINVAL; | |
557 | goto out; | 557 | goto out; | |
558 | } | 558 | } | |
559 | 559 | |||
560 | trace_i915_gem_object_pread(obj, args->offset, args->size); | 560 | trace_i915_gem_object_pread(obj, args->offset, args->size); | |
561 | 561 | |||
562 | ret = i915_gem_shmem_pread(dev, obj, args, file); | 562 | ret = i915_gem_shmem_pread(dev, obj, args, file); | |
563 | 563 | |||
564 | out: | 564 | out: | |
565 | drm_gem_object_unreference(&obj->base); | 565 | drm_gem_object_unreference(&obj->base); | |
566 | unlock: | 566 | unlock: | |
567 | mutex_unlock(&dev->struct_mutex); | 567 | mutex_unlock(&dev->struct_mutex); | |
568 | return ret; | 568 | return ret; | |
569 | } | 569 | } | |
570 | 570 | |||
571 | /* This is the fast write path which cannot handle | 571 | /* This is the fast write path which cannot handle | |
572 | * page faults in the source data | 572 | * page faults in the source data | |
573 | */ | 573 | */ | |
574 | 574 | |||
575 | static inline int | 575 | static inline int | |
576 | fast_user_write(struct io_mapping *mapping, | 576 | fast_user_write(struct io_mapping *mapping, | |
577 | loff_t page_base, int page_offset, | 577 | loff_t page_base, int page_offset, | |
578 | char __user *user_data, | 578 | char __user *user_data, | |
579 | int length) | 579 | int length) | |
580 | { | 580 | { | |
581 | void __iomem *vaddr_atomic; | 581 | void __iomem *vaddr_atomic; | |
582 | void *vaddr; | 582 | void *vaddr; | |
583 | unsigned long unwritten; | 583 | unsigned long unwritten; | |
584 | 584 | |||
585 | vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); | 585 | vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); | |
586 | /* We can use the cpu mem copy function because this is X86. */ | 586 | /* We can use the cpu mem copy function because this is X86. */ | |
587 | vaddr = (void __force*)vaddr_atomic + page_offset; | 587 | vaddr = (void __force*)vaddr_atomic + page_offset; | |
588 | unwritten = __copy_from_user_inatomic_nocache(vaddr, | 588 | unwritten = __copy_from_user_inatomic_nocache(vaddr, | |
589 | user_data, length); | 589 | user_data, length); | |
590 | io_mapping_unmap_atomic(vaddr_atomic); | 590 | io_mapping_unmap_atomic(vaddr_atomic); | |
591 | return unwritten; | 591 | return unwritten; | |
592 | } | 592 | } | |
593 | 593 | |||
594 | /** | 594 | /** | |
595 | * This is the fast pwrite path, where we copy the data directly from the | 595 | * This is the fast pwrite path, where we copy the data directly from the | |
596 | * user into the GTT, uncached. | 596 | * user into the GTT, uncached. | |
597 | */ | 597 | */ | |
598 | static int | 598 | static int | |
599 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, | 599 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, | |
600 | struct drm_i915_gem_object *obj, | 600 | struct drm_i915_gem_object *obj, | |
601 | struct drm_i915_gem_pwrite *args, | 601 | struct drm_i915_gem_pwrite *args, | |
602 | struct drm_file *file) | 602 | struct drm_file *file) | |
603 | { | 603 | { | |
604 | drm_i915_private_t *dev_priv = dev->dev_private; | 604 | drm_i915_private_t *dev_priv = dev->dev_private; | |
605 | ssize_t remain; | 605 | ssize_t remain; | |
606 | loff_t offset, page_base; | 606 | loff_t offset, page_base; | |
607 | char __user *user_data; | 607 | char __user *user_data; | |
608 | int page_offset, page_length, ret; | 608 | int page_offset, page_length, ret; | |
609 | 609 | |||
610 | ret = i915_gem_object_pin(obj, 0, true, true); | 610 | ret = i915_gem_object_pin(obj, 0, true, true); | |
611 | if (ret) | 611 | if (ret) | |
612 | goto out; | 612 | goto out; | |
613 | 613 | |||
614 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | 614 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | |
615 | if (ret) | 615 | if (ret) | |
616 | goto out_unpin; | 616 | goto out_unpin; | |
617 | 617 | |||
618 | ret = i915_gem_object_put_fence(obj); | 618 | ret = i915_gem_object_put_fence(obj); | |
619 | if (ret) | 619 | if (ret) | |
620 | goto out_unpin; | 620 | goto out_unpin; | |
621 | 621 | |||
622 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 622 | user_data = (char __user *) (uintptr_t) args->data_ptr; | |
623 | remain = args->size; | 623 | remain = args->size; | |
624 | 624 | |||
625 | offset = obj->gtt_offset + args->offset; | 625 | offset = obj->gtt_offset + args->offset; | |
626 | 626 | |||
627 | while (remain > 0) { | 627 | while (remain > 0) { | |
628 | /* Operation in this page | 628 | /* Operation in this page | |
629 | * | 629 | * | |
630 | * page_base = page offset within aperture | 630 | * page_base = page offset within aperture | |
631 | * page_offset = offset within page | 631 | * page_offset = offset within page | |
632 | * page_length = bytes to copy for this page | 632 | * page_length = bytes to copy for this page | |
633 | */ | 633 | */ | |
634 | page_base = offset & PAGE_MASK; | 634 | page_base = offset & PAGE_MASK; | |
635 | page_offset = offset_in_page(offset); | 635 | page_offset = offset_in_page(offset); | |
636 | page_length = remain; | 636 | page_length = remain; | |
637 | if ((page_offset + remain) > PAGE_SIZE) | 637 | if ((page_offset + remain) > PAGE_SIZE) | |
638 | page_length = PAGE_SIZE - page_offset; | 638 | page_length = PAGE_SIZE - page_offset; | |
639 | 639 | |||
640 | /* If we get a fault while copying data, then (presumably) our | 640 | /* If we get a fault while copying data, then (presumably) our | |
641 | * source page isn't available. Return the error and we'll | 641 | * source page isn't available. Return the error and we'll | |
642 | * retry in the slow path. | 642 | * retry in the slow path. | |
643 | */ | 643 | */ | |
644 | if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, | 644 | if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, | |
645 | page_offset, user_data, page_length)) { | 645 | page_offset, user_data, page_length)) { | |
646 | ret = -EFAULT; | 646 | ret = -EFAULT; | |
647 | goto out_unpin; | 647 | goto out_unpin; | |
648 | } | 648 | } | |
649 | 649 | |||
650 | remain -= page_length; | 650 | remain -= page_length; | |
651 | user_data += page_length; | 651 | user_data += page_length; | |
652 | offset += page_length; | 652 | offset += page_length; | |
653 | } | 653 | } | |
654 | 654 | |||
655 | out_unpin: | 655 | out_unpin: | |
656 | i915_gem_object_unpin(obj); | 656 | i915_gem_object_unpin(obj); | |
657 | out: | 657 | out: | |
658 | return ret; | 658 | return ret; | |
659 | } | 659 | } | |
660 | 660 | |||
661 | /* Per-page copy function for the shmem pwrite fastpath. | 661 | /* Per-page copy function for the shmem pwrite fastpath. | |
662 | * Flushes invalid cachelines before writing to the target if | 662 | * Flushes invalid cachelines before writing to the target if | |
663 | * needs_clflush_before is set and flushes out any written cachelines after | 663 | * needs_clflush_before is set and flushes out any written cachelines after | |
664 | * writing if needs_clflush is set. */ | 664 | * writing if needs_clflush is set. */ | |
665 | static int | 665 | static int | |
666 | shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, | 666 | shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, | |
667 | char __user *user_data, | 667 | char __user *user_data, | |
668 | bool page_do_bit17_swizzling, | 668 | bool page_do_bit17_swizzling, | |
669 | bool needs_clflush_before, | 669 | bool needs_clflush_before, | |
670 | bool needs_clflush_after) | 670 | bool needs_clflush_after) | |
671 | { | 671 | { | |
672 | char *vaddr; | 672 | char *vaddr; | |
673 | int ret; | 673 | int ret; | |
674 | 674 | |||
675 | if (unlikely(page_do_bit17_swizzling)) | 675 | if (unlikely(page_do_bit17_swizzling)) | |
676 | return -EINVAL; | 676 | return -EINVAL; | |
677 | 677 | |||
678 | vaddr = kmap_atomic(page); | 678 | vaddr = kmap_atomic(page); | |
679 | if (needs_clflush_before) | 679 | if (needs_clflush_before) | |
680 | drm_clflush_virt_range(vaddr + shmem_page_offset, | 680 | drm_clflush_virt_range(vaddr + shmem_page_offset, | |
681 | page_length); | 681 | page_length); | |
682 | ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, | 682 | ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, | |
683 | user_data, | 683 | user_data, | |
684 | page_length); | 684 | page_length); | |
685 | if (needs_clflush_after) | 685 | if (needs_clflush_after) | |
686 | drm_clflush_virt_range(vaddr + shmem_page_offset, | 686 | drm_clflush_virt_range(vaddr + shmem_page_offset, | |
687 | page_length); | 687 | page_length); | |
688 | kunmap_atomic(vaddr); | 688 | kunmap_atomic(vaddr); | |
689 | 689 | |||
690 | return ret ? -EFAULT : 0; | 690 | return ret ? -EFAULT : 0; | |
691 | } | 691 | } | |
692 | 692 | |||
693 | /* Only difference to the fast-path function is that this can handle bit17 | 693 | /* Only difference to the fast-path function is that this can handle bit17 | |
694 | * and uses non-atomic copy and kmap functions. */ | 694 | * and uses non-atomic copy and kmap functions. */ | |
695 | static int | 695 | static int | |
696 | shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, | 696 | shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, | |
697 | char __user *user_data, | 697 | char __user *user_data, | |
698 | bool page_do_bit17_swizzling, | 698 | bool page_do_bit17_swizzling, | |
699 | bool needs_clflush_before, | 699 | bool needs_clflush_before, | |
700 | bool needs_clflush_after) | 700 | bool needs_clflush_after) | |
701 | { | 701 | { | |
702 | char *vaddr; | 702 | char *vaddr; | |
703 | int ret; | 703 | int ret; | |
704 | 704 | |||
705 | vaddr = kmap(page); | 705 | vaddr = kmap(page); | |
706 | if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) | 706 | if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) | |
707 | shmem_clflush_swizzled_range(vaddr + shmem_page_offset, | 707 | shmem_clflush_swizzled_range(vaddr + shmem_page_offset, | |
708 | page_length, | 708 | page_length, | |
709 | page_do_bit17_swizzling); | 709 | page_do_bit17_swizzling); | |
710 | if (page_do_bit17_swizzling) | 710 | if (page_do_bit17_swizzling) | |
711 | ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, | 711 | ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, | |
712 | user_data, | 712 | user_data, | |
713 | page_length); | 713 | page_length); | |
714 | else | 714 | else | |
715 | ret = __copy_from_user(vaddr + shmem_page_offset, | 715 | ret = __copy_from_user(vaddr + shmem_page_offset, | |
716 | user_data, | 716 | user_data, | |
717 | page_length); | 717 | page_length); | |
718 | if (needs_clflush_after) | 718 | if (needs_clflush_after) | |
719 | shmem_clflush_swizzled_range(vaddr + shmem_page_offset, | 719 | shmem_clflush_swizzled_range(vaddr + shmem_page_offset, | |
720 | page_length, | 720 | page_length, | |
721 | page_do_bit17_swizzling); | 721 | page_do_bit17_swizzling); | |
722 | kunmap(page); | 722 | kunmap(page); | |
723 | 723 | |||
724 | return ret ? -EFAULT : 0; | 724 | return ret ? -EFAULT : 0; | |
725 | } | 725 | } | |
726 | 726 | |||
727 | static int | 727 | static int | |
728 | i915_gem_shmem_pwrite(struct drm_device *dev, | 728 | i915_gem_shmem_pwrite(struct drm_device *dev, | |
729 | struct drm_i915_gem_object *obj, | 729 | struct drm_i915_gem_object *obj, | |
730 | struct drm_i915_gem_pwrite *args, | 730 | struct drm_i915_gem_pwrite *args, | |
731 | struct drm_file *file) | 731 | struct drm_file *file) | |
732 | { | 732 | { | |
733 | ssize_t remain; | 733 | ssize_t remain; | |
734 | loff_t offset; | 734 | loff_t offset; | |
735 | char __user *user_data; | 735 | char __user *user_data; | |
736 | int shmem_page_offset, page_length, ret = 0; | 736 | int shmem_page_offset, page_length, ret = 0; | |
737 | int obj_do_bit17_swizzling, page_do_bit17_swizzling; | 737 | int obj_do_bit17_swizzling, page_do_bit17_swizzling; | |
738 | int hit_slowpath = 0; | 738 | int hit_slowpath = 0; | |
739 | int needs_clflush_after = 0; | 739 | int needs_clflush_after = 0; | |
740 | int needs_clflush_before = 0; | 740 | int needs_clflush_before = 0; | |
741 | int i; | 741 | int i; | |
742 | struct scatterlist *sg; | 742 | struct scatterlist *sg; | |
743 | 743 | |||
744 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 744 | user_data = (char __user *) (uintptr_t) args->data_ptr; | |
745 | remain = args->size; | 745 | remain = args->size; | |
746 | 746 | |||
747 | obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 747 | obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | |
748 | 748 | |||
749 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | 749 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | |
750 | /* If we're not in the cpu write domain, set ourself into the gtt | 750 | /* If we're not in the cpu write domain, set ourself into the gtt | |
751 | * write domain and manually flush cachelines (if required). This | 751 | * write domain and manually flush cachelines (if required). This | |
752 | * optimizes for the case when the gpu will use the data | 752 | * optimizes for the case when the gpu will use the data | |
753 | * right away and we therefore have to clflush anyway. */ | 753 | * right away and we therefore have to clflush anyway. */ | |
754 | if (obj->cache_level == I915_CACHE_NONE) | 754 | if (obj->cache_level == I915_CACHE_NONE) | |
755 | needs_clflush_after = 1; | 755 | needs_clflush_after = 1; | |
756 | if (obj->gtt_space) { | 756 | if (obj->gtt_space) { | |
757 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | 757 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | |
758 | if (ret) | 758 | if (ret) | |
759 | return ret; | 759 | return ret; | |
760 | } | 760 | } | |
761 | } | 761 | } | |
762 | /* Same trick applies for invalidate partially written cachelines before | 762 | /* Same trick applies for invalidate partially written cachelines before | |
763 | * writing. */ | 763 | * writing. */ | |
764 | if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU) | 764 | if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU) | |
765 | && obj->cache_level == I915_CACHE_NONE) | 765 | && obj->cache_level == I915_CACHE_NONE) | |
766 | needs_clflush_before = 1; | 766 | needs_clflush_before = 1; | |
767 | 767 | |||
768 | ret = i915_gem_object_get_pages(obj); | 768 | ret = i915_gem_object_get_pages(obj); | |
769 | if (ret) | 769 | if (ret) | |
770 | return ret; | 770 | return ret; | |
771 | 771 | |||
772 | i915_gem_object_pin_pages(obj); | 772 | i915_gem_object_pin_pages(obj); | |
773 | 773 | |||
774 | offset = args->offset; | 774 | offset = args->offset; | |
775 | obj->dirty = 1; | 775 | obj->dirty = 1; | |
776 | 776 | |||
777 | for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { | 777 | for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { | |
778 | struct page *page; | 778 | struct page *page; | |
779 | int partial_cacheline_write; | 779 | int partial_cacheline_write; | |
780 | 780 | |||
781 | if (i < offset >> PAGE_SHIFT) | 781 | if (i < offset >> PAGE_SHIFT) | |
782 | continue; | 782 | continue; | |
783 | 783 | |||
784 | if (remain <= 0) | 784 | if (remain <= 0) | |
785 | break; | 785 | break; | |
786 | 786 | |||
787 | /* Operation in this page | 787 | /* Operation in this page | |
788 | * | 788 | * | |
789 | * shmem_page_offset = offset within page in shmem file | 789 | * shmem_page_offset = offset within page in shmem file | |
790 | * page_length = bytes to copy for this page | 790 | * page_length = bytes to copy for this page | |
791 | */ | 791 | */ | |
792 | shmem_page_offset = offset_in_page(offset); | 792 | shmem_page_offset = offset_in_page(offset); | |
793 | 793 | |||
794 | page_length = remain; | 794 | page_length = remain; | |
795 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | 795 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | |
796 | page_length = PAGE_SIZE - shmem_page_offset; | 796 | page_length = PAGE_SIZE - shmem_page_offset; | |
797 | 797 | |||
798 | /* If we don't overwrite a cacheline completely we need to be | 798 | /* If we don't overwrite a cacheline completely we need to be | |
799 | * careful to have up-to-date data by first clflushing. Don't | 799 | * careful to have up-to-date data by first clflushing. Don't | |
800 | * overcomplicate things and flush the entire patch. */ | 800 | * overcomplicate things and flush the entire patch. */ | |
801 | partial_cacheline_write = needs_clflush_before && | 801 | partial_cacheline_write = needs_clflush_before && | |
802 | ((shmem_page_offset | page_length) | 802 | ((shmem_page_offset | page_length) | |
803 | & (boot_cpu_data.x86_clflush_size - 1)); | 803 | & (boot_cpu_data.x86_clflush_size - 1)); | |
804 | 804 | |||
805 | page = sg_page(sg); | 805 | page = sg_page(sg); | |
806 | page_do_bit17_swizzling = obj_do_bit17_swizzling && | 806 | page_do_bit17_swizzling = obj_do_bit17_swizzling && | |
807 | (page_to_phys(page) & (1 << 17)) != 0; | 807 | (page_to_phys(page) & (1 << 17)) != 0; | |
808 | 808 | |||
809 | ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, | 809 | ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, | |
810 | user_data, page_do_bit17_swizzling, | 810 | user_data, page_do_bit17_swizzling, | |
811 | partial_cacheline_write, | 811 | partial_cacheline_write, | |
812 | needs_clflush_after); | 812 | needs_clflush_after); | |
813 | if (ret == 0) | 813 | if (ret == 0) | |
814 | goto next_page; | 814 | goto next_page; | |
815 | 815 | |||
816 | hit_slowpath = 1; | 816 | hit_slowpath = 1; | |
817 | mutex_unlock(&dev->struct_mutex); | 817 | mutex_unlock(&dev->struct_mutex); | |
818 | ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, | 818 | ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, | |
819 | user_data, page_do_bit17_swizzling, | 819 | user_data, page_do_bit17_swizzling, | |
820 | partial_cacheline_write, | 820 | partial_cacheline_write, | |
821 | needs_clflush_after); | 821 | needs_clflush_after); | |
822 | 822 | |||
823 | mutex_lock(&dev->struct_mutex); | 823 | mutex_lock(&dev->struct_mutex); | |
824 | 824 | |||
825 | next_page: | 825 | next_page: | |
826 | set_page_dirty(page); | 826 | set_page_dirty(page); | |
827 | mark_page_accessed(page); | 827 | mark_page_accessed(page); | |
828 | 828 | |||
829 | if (ret) | 829 | if (ret) | |
830 | goto out; | 830 | goto out; | |
831 | 831 | |||
832 | remain -= page_length; | 832 | remain -= page_length; | |
833 | user_data += page_length; | 833 | user_data += page_length; | |
834 | offset += page_length; | 834 | offset += page_length; | |
835 | } | 835 | } | |
836 | 836 | |||
837 | out: | 837 | out: | |
838 | i915_gem_object_unpin_pages(obj); | 838 | i915_gem_object_unpin_pages(obj); | |
839 | 839 | |||
840 | if (hit_slowpath) { | 840 | if (hit_slowpath) { | |
841 | /* Fixup: Kill any reinstated backing storage pages */ | 841 | /* Fixup: Kill any reinstated backing storage pages */ | |
842 | if (obj->madv == __I915_MADV_PURGED) | 842 | if (obj->madv == __I915_MADV_PURGED) | |
843 | i915_gem_object_truncate(obj); | 843 | i915_gem_object_truncate(obj); | |
844 | /* and flush dirty cachelines in case the object isn't in the cpu write | 844 | /* and flush dirty cachelines in case the object isn't in the cpu write | |
845 | * domain anymore. */ | 845 | * domain anymore. */ | |
846 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | 846 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | |
847 | i915_gem_clflush_object(obj); | 847 | i915_gem_clflush_object(obj); | |
848 | i915_gem_chipset_flush(dev); | 848 | i915_gem_chipset_flush(dev); | |
849 | } | 849 | } | |
850 | } | 850 | } | |
851 | 851 | |||
852 | if (needs_clflush_after) | 852 | if (needs_clflush_after) | |
853 | i915_gem_chipset_flush(dev); | 853 | i915_gem_chipset_flush(dev); | |
854 | 854 | |||
855 | return ret; | 855 | return ret; | |
856 | } | 856 | } | |
857 | 857 | |||
858 | /** | 858 | /** | |
859 | * Writes data to the object referenced by handle. | 859 | * Writes data to the object referenced by handle. | |
860 | * | 860 | * | |
861 | * On error, the contents of the buffer that were to be modified are undefined. | 861 | * On error, the contents of the buffer that were to be modified are undefined. | |
862 | */ | 862 | */ | |
863 | int | 863 | int | |
864 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | 864 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |
865 | struct drm_file *file) | 865 | struct drm_file *file) | |
866 | { | 866 | { | |
867 | struct drm_i915_gem_pwrite *args = data; | 867 | struct drm_i915_gem_pwrite *args = data; | |
868 | struct drm_i915_gem_object *obj; | 868 | struct drm_i915_gem_object *obj; | |
869 | int ret; | 869 | int ret; | |
870 | 870 | |||
871 | if (args->size == 0) | 871 | if (args->size == 0) | |
872 | return 0; | 872 | return 0; | |
873 | 873 | |||
874 | if (!access_ok(VERIFY_READ, | 874 | if (!access_ok(VERIFY_READ, | |
875 | (char __user *)(uintptr_t)args->data_ptr, | 875 | (char __user *)(uintptr_t)args->data_ptr, | |
876 | args->size)) | 876 | args->size)) | |
877 | return -EFAULT; | 877 | return -EFAULT; | |
878 | 878 | |||
879 | ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr, | 879 | ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr, | |
880 | args->size); | 880 | args->size); | |
881 | if (ret) | 881 | if (ret) | |
882 | return -EFAULT; | 882 | return -EFAULT; | |
883 | 883 | |||
884 | ret = i915_mutex_lock_interruptible(dev); | 884 | ret = i915_mutex_lock_interruptible(dev); | |
885 | if (ret) | 885 | if (ret) | |
886 | return ret; | 886 | return ret; | |
887 | 887 | |||
888 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 888 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | |
889 | if (&obj->base == NULL) { | 889 | if (&obj->base == NULL) { | |
890 | ret = -ENOENT; | 890 | ret = -ENOENT; | |
891 | goto unlock; | 891 | goto unlock; | |
892 | } | 892 | } | |
893 | 893 | |||
894 | /* Bounds check destination. */ | 894 | /* Bounds check destination. */ | |
895 | if (args->offset > obj->base.size || | 895 | if (args->offset > obj->base.size || | |
896 | args->size > obj->base.size - args->offset) { | 896 | args->size > obj->base.size - args->offset) { | |
897 | ret = -EINVAL; | 897 | ret = -EINVAL; | |
898 | goto out; | 898 | goto out; | |
899 | } | 899 | } | |
900 | 900 | |||
901 | /* prime objects have no backing filp to GEM pread/pwrite | 901 | /* prime objects have no backing filp to GEM pread/pwrite | |
902 | * pages from. | 902 | * pages from. | |
903 | */ | 903 | */ | |
904 | if (!obj->base.filp) { | 904 | if (!obj->base.filp) { | |
905 | ret = -EINVAL; | 905 | ret = -EINVAL; | |
906 | goto out; | 906 | goto out; | |
907 | } | 907 | } | |
908 | 908 | |||
909 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); | 909 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); | |
910 | 910 | |||
911 | ret = -EFAULT; | 911 | ret = -EFAULT; | |
912 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 912 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | |
913 | * it would end up going through the fenced access, and we'll get | 913 | * it would end up going through the fenced access, and we'll get | |
914 | * different detiling behavior between reading and writing. | 914 | * different detiling behavior between reading and writing. | |
915 | * pread/pwrite currently are reading and writing from the CPU | 915 | * pread/pwrite currently are reading and writing from the CPU | |
916 | * perspective, requiring manual detiling by the client. | 916 | * perspective, requiring manual detiling by the client. | |
917 | */ | 917 | */ | |
918 | if (obj->phys_obj) { | 918 | if (obj->phys_obj) { | |
919 | ret = i915_gem_phys_pwrite(dev, obj, args, file); | 919 | ret = i915_gem_phys_pwrite(dev, obj, args, file); | |
920 | goto out; | 920 | goto out; | |
921 | } | 921 | } | |
922 | 922 | |||
923 | if (obj->cache_level == I915_CACHE_NONE && | 923 | if (obj->cache_level == I915_CACHE_NONE && | |
924 | obj->tiling_mode == I915_TILING_NONE && | 924 | obj->tiling_mode == I915_TILING_NONE && | |
925 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | 925 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { | |
926 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); | 926 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); | |
927 | /* Note that the gtt paths might fail with non-page-backed user | 927 | /* Note that the gtt paths might fail with non-page-backed user | |
928 | * pointers (e.g. gtt mappings when moving data between | 928 | * pointers (e.g. gtt mappings when moving data between | |
929 | * textures). Fallback to the shmem path in that case. */ | 929 | * textures). Fallback to the shmem path in that case. */ | |
930 | } | 930 | } | |
931 | 931 | |||
932 | if (ret == -EFAULT || ret == -ENOSPC) | 932 | if (ret == -EFAULT || ret == -ENOSPC) | |
933 | ret = i915_gem_shmem_pwrite(dev, obj, args, file); | 933 | ret = i915_gem_shmem_pwrite(dev, obj, args, file); | |
934 | 934 | |||
935 | out: | 935 | out: | |
936 | drm_gem_object_unreference(&obj->base); | 936 | drm_gem_object_unreference(&obj->base); | |
937 | unlock: | 937 | unlock: | |
938 | mutex_unlock(&dev->struct_mutex); | 938 | mutex_unlock(&dev->struct_mutex); | |
939 | return ret; | 939 | return ret; | |
940 | } | 940 | } | |
941 | 941 | |||
942 | int | 942 | int | |
943 | i915_gem_check_wedge(struct drm_i915_private *dev_priv, | 943 | i915_gem_check_wedge(struct drm_i915_private *dev_priv, | |
944 | bool interruptible) | 944 | bool interruptible) | |
945 | { | 945 | { | |
946 | if (atomic_read(&dev_priv->mm.wedged)) { | 946 | if (atomic_read(&dev_priv->mm.wedged)) { | |
947 | struct completion *x = &dev_priv->error_completion; | 947 | struct completion *x = &dev_priv->error_completion; | |
948 | bool recovery_complete; | 948 | bool recovery_complete; | |
949 | unsigned long flags; | 949 | unsigned long flags; | |
950 | 950 | |||
951 | /* Give the error handler a chance to run. */ | 951 | /* Give the error handler a chance to run. */ | |
952 | spin_lock_irqsave(&x->wait.lock, flags); | 952 | spin_lock_irqsave(&x->wait.lock, flags); | |
953 | recovery_complete = x->done > 0; | 953 | recovery_complete = x->done > 0; | |
954 | spin_unlock_irqrestore(&x->wait.lock, flags); | 954 | spin_unlock_irqrestore(&x->wait.lock, flags); | |
955 | 955 | |||
956 | /* Non-interruptible callers can't handle -EAGAIN, hence return | 956 | /* Non-interruptible callers can't handle -EAGAIN, hence return | |
957 | * -EIO unconditionally for these. */ | 957 | * -EIO unconditionally for these. */ | |
958 | if (!interruptible) | 958 | if (!interruptible) | |
959 | return -EIO; | 959 | return -EIO; | |
960 | 960 | |||
961 | /* Recovery complete, but still wedged means reset failure. */ | 961 | /* Recovery complete, but still wedged means reset failure. */ | |
962 | if (recovery_complete) | 962 | if (recovery_complete) | |
963 | return -EIO; | 963 | return -EIO; | |
964 | 964 | |||
965 | return -EAGAIN; | 965 | return -EAGAIN; | |
966 | } | 966 | } | |
967 | 967 | |||
968 | return 0; | 968 | return 0; | |
969 | } | 969 | } | |
970 | 970 | |||
971 | /* | 971 | /* | |
972 | * Compare seqno against outstanding lazy request. Emit a request if they are | 972 | * Compare seqno against outstanding lazy request. Emit a request if they are | |
973 | * equal. | 973 | * equal. | |
974 | */ | 974 | */ | |
975 | static int | 975 | static int | |
976 | i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) | 976 | i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) | |
977 | { | 977 | { | |
978 | int ret; | 978 | int ret; | |
979 | 979 | |||
980 | BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); | 980 | BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); | |
981 | 981 | |||
982 | ret = 0; | 982 | ret = 0; | |
983 | if (seqno == ring->outstanding_lazy_request) | 983 | if (seqno == ring->outstanding_lazy_request) | |
984 | ret = i915_add_request(ring, NULL, NULL); | 984 | ret = i915_add_request(ring, NULL, NULL); | |
985 | 985 | |||
986 | return ret; | 986 | return ret; | |
987 | } | 987 | } | |
988 | 988 | |||
989 | /** | 989 | /** | |
990 | * __wait_seqno - wait until execution of seqno has finished | 990 | * __wait_seqno - wait until execution of seqno has finished | |
991 | * @ring: the ring expected to report seqno | 991 | * @ring: the ring expected to report seqno | |
992 | * @seqno: duh! | 992 | * @seqno: duh! | |
993 | * @interruptible: do an interruptible wait (normally yes) | 993 | * @interruptible: do an interruptible wait (normally yes) | |
994 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining | 994 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining | |
995 | * | 995 | * | |
996 | * Returns 0 if the seqno was found within the alloted time. Else returns the | 996 | * Returns 0 if the seqno was found within the alloted time. Else returns the | |
997 | * errno with remaining time filled in timeout argument. | 997 | * errno with remaining time filled in timeout argument. | |
998 | */ | 998 | */ | |
999 | static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | 999 | static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |
1000 | bool interruptible, struct timespec *timeout) | 1000 | bool interruptible, struct timespec *timeout) | |
1001 | { | 1001 | { | |
1002 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 1002 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | |
1003 | struct timespec before, now, wait_time={1,0}; | 1003 | struct timespec before, now, wait_time={1,0}; | |
1004 | unsigned long timeout_jiffies; | 1004 | unsigned long timeout_jiffies; | |
1005 | long end; | 1005 | long end; | |
1006 | bool wait_forever = true; | 1006 | bool wait_forever = true; | |
1007 | int ret; | 1007 | int ret; | |
1008 | 1008 | |||
1009 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) | 1009 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) | |
1010 | return 0; | 1010 | return 0; | |
1011 | 1011 | |||
1012 | trace_i915_gem_request_wait_begin(ring, seqno); | 1012 | trace_i915_gem_request_wait_begin(ring, seqno); | |
1013 | 1013 | |||
1014 | if (timeout != NULL) { | 1014 | if (timeout != NULL) { | |
1015 | wait_time = *timeout; | 1015 | wait_time = *timeout; | |
1016 | wait_forever = false; | 1016 | wait_forever = false; | |
1017 | } | 1017 | } | |
1018 | 1018 | |||
1019 | timeout_jiffies = timespec_to_jiffies(&wait_time); | 1019 | timeout_jiffies = timespec_to_jiffies(&wait_time); | |
1020 | 1020 | |||
1021 | if (WARN_ON(!ring->irq_get(ring))) | 1021 | if (WARN_ON(!ring->irq_get(ring))) | |
1022 | return -ENODEV; | 1022 | return -ENODEV; | |
1023 | 1023 | |||
1024 | /* Record current time in case interrupted by signal, or wedged * */ | 1024 | /* Record current time in case interrupted by signal, or wedged * */ | |
1025 | getrawmonotonic(&before); | 1025 | getrawmonotonic(&before); | |
1026 | 1026 | |||
1027 | #define EXIT_COND \ | 1027 | #define EXIT_COND \ | |
1028 | (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ | 1028 | (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ | |
1029 | atomic_read(&dev_priv->mm.wedged)) | 1029 | atomic_read(&dev_priv->mm.wedged)) | |
1030 | do { | 1030 | do { | |
1031 | #ifdef __NetBSD__ | |||
1032 | /* | |||
1033 | * XXX This wait is always interruptible; we should | |||
1034 | * heed the flag `interruptible'. | |||
1035 | */ | |||
1036 | DRM_TIMED_WAIT_UNTIL(end, &ring->irq_queue, &drm_global_mutex, | |||
1037 | timeout_jiffies, | |||
1038 | EXIT_COND); | |||
1039 | #else | |||
1031 | if (interruptible) | 1040 | if (interruptible) | |
1032 | end = wait_event_interruptible_timeout(ring->irq_queue, | 1041 | end = wait_event_interruptible_timeout(ring->irq_queue, | |
1033 | EXIT_COND, | 1042 | EXIT_COND, | |
1034 | timeout_jiffies); | 1043 | timeout_jiffies); | |
1035 | else | 1044 | else | |
1036 | end = wait_event_timeout(ring->irq_queue, EXIT_COND, | 1045 | end = wait_event_timeout(ring->irq_queue, EXIT_COND, | |
1037 | timeout_jiffies); | 1046 | timeout_jiffies); | |
1038 | 1047 | |||
1048 | #endif | |||
1039 | ret = i915_gem_check_wedge(dev_priv, interruptible); | 1049 | ret = i915_gem_check_wedge(dev_priv, interruptible); | |
1040 | if (ret) | 1050 | if (ret) | |
1041 | end = ret; | 1051 | end = ret; | |
1042 | } while (end == 0 && wait_forever); | 1052 | } while (end == 0 && wait_forever); | |
1043 | 1053 | |||
1044 | getrawmonotonic(&now); | 1054 | getrawmonotonic(&now); | |
1045 | 1055 | |||
1046 | ring->irq_put(ring); | 1056 | ring->irq_put(ring); | |
1047 | trace_i915_gem_request_wait_end(ring, seqno); | 1057 | trace_i915_gem_request_wait_end(ring, seqno); | |
1048 | #undef EXIT_COND | 1058 | #undef EXIT_COND | |
1049 | 1059 | |||
1050 | if (timeout) { | 1060 | if (timeout) { | |
1051 | struct timespec sleep_time = timespec_sub(now, before); | 1061 | struct timespec sleep_time = timespec_sub(now, before); | |
1052 | *timeout = timespec_sub(*timeout, sleep_time); | 1062 | *timeout = timespec_sub(*timeout, sleep_time); | |
1053 | } | 1063 | } | |
1054 | 1064 | |||
1055 | switch (end) { | 1065 | switch (end) { | |
1056 | case -EIO: | 1066 | case -EIO: | |
1057 | case -EAGAIN: /* Wedged */ | 1067 | case -EAGAIN: /* Wedged */ | |
1058 | case -ERESTARTSYS: /* Signal */ | 1068 | case -ERESTARTSYS: /* Signal */ | |
1059 | return (int)end; | 1069 | return (int)end; | |
1060 | case 0: /* Timeout */ | 1070 | case 0: /* Timeout */ | |
1061 | if (timeout) | 1071 | if (timeout) | |
1062 | set_normalized_timespec(timeout, 0, 0); | 1072 | set_normalized_timespec(timeout, 0, 0); | |
1063 | return -ETIME; | 1073 | return -ETIME; | |
1064 | default: /* Completed */ | 1074 | default: /* Completed */ | |
1065 | WARN_ON(end < 0); /* We're not aware of other errors */ | 1075 | WARN_ON(end < 0); /* We're not aware of other errors */ | |
1066 | return 0; | 1076 | return 0; | |
1067 | } | 1077 | } | |
1068 | } | 1078 | } | |
1069 | 1079 | |||
1070 | /** | 1080 | /** | |
1071 | * Waits for a sequence number to be signaled, and cleans up the | 1081 | * Waits for a sequence number to be signaled, and cleans up the | |
1072 | * request and object lists appropriately for that event. | 1082 | * request and object lists appropriately for that event. | |
1073 | */ | 1083 | */ | |
1074 | int | 1084 | int | |
1075 | i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) | 1085 | i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) | |
1076 | { | 1086 | { | |
1077 | struct drm_device *dev = ring->dev; | 1087 | struct drm_device *dev = ring->dev; | |
1078 | struct drm_i915_private *dev_priv = dev->dev_private; | 1088 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1079 | bool interruptible = dev_priv->mm.interruptible; | 1089 | bool interruptible = dev_priv->mm.interruptible; | |
1080 | int ret; | 1090 | int ret; | |
1081 | 1091 | |||
1082 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 1092 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | |
1083 | BUG_ON(seqno == 0); | 1093 | BUG_ON(seqno == 0); | |
1084 | 1094 | |||
1085 | ret = i915_gem_check_wedge(dev_priv, interruptible); | 1095 | ret = i915_gem_check_wedge(dev_priv, interruptible); | |
1086 | if (ret) | 1096 | if (ret) | |
1087 | return ret; | 1097 | return ret; | |
1088 | 1098 | |||
1089 | ret = i915_gem_check_olr(ring, seqno); | 1099 | ret = i915_gem_check_olr(ring, seqno); | |
1090 | if (ret) | 1100 | if (ret) | |
1091 | return ret; | 1101 | return ret; | |
1092 | 1102 | |||
1093 | return __wait_seqno(ring, seqno, interruptible, NULL); | 1103 | return __wait_seqno(ring, seqno, interruptible, NULL); | |
1094 | } | 1104 | } | |
1095 | 1105 | |||
1096 | /** | 1106 | /** | |
1097 | * Ensures that all rendering to the object has completed and the object is | 1107 | * Ensures that all rendering to the object has completed and the object is | |
1098 | * safe to unbind from the GTT or access from the CPU. | 1108 | * safe to unbind from the GTT or access from the CPU. | |
1099 | */ | 1109 | */ | |
1100 | static __must_check int | 1110 | static __must_check int | |
1101 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | 1111 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | |
1102 | bool readonly) | 1112 | bool readonly) | |
1103 | { | 1113 | { | |
1104 | struct intel_ring_buffer *ring = obj->ring; | 1114 | struct intel_ring_buffer *ring = obj->ring; | |
1105 | u32 seqno; | 1115 | u32 seqno; | |
1106 | int ret; | 1116 | int ret; | |
1107 | 1117 | |||
1108 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; | 1118 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; | |
1109 | if (seqno == 0) | 1119 | if (seqno == 0) | |
1110 | return 0; | 1120 | return 0; | |
1111 | 1121 | |||
1112 | ret = i915_wait_seqno(ring, seqno); | 1122 | ret = i915_wait_seqno(ring, seqno); | |
1113 | if (ret) | 1123 | if (ret) | |
1114 | return ret; | 1124 | return ret; | |
1115 | 1125 | |||
1116 | i915_gem_retire_requests_ring(ring); | 1126 | i915_gem_retire_requests_ring(ring); | |
1117 | 1127 | |||
1118 | /* Manually manage the write flush as we may have not yet | 1128 | /* Manually manage the write flush as we may have not yet | |
1119 | * retired the buffer. | 1129 | * retired the buffer. | |
1120 | */ | 1130 | */ | |
1121 | if (obj->last_write_seqno && | 1131 | if (obj->last_write_seqno && | |
1122 | i915_seqno_passed(seqno, obj->last_write_seqno)) { | 1132 | i915_seqno_passed(seqno, obj->last_write_seqno)) { | |
1123 | obj->last_write_seqno = 0; | 1133 | obj->last_write_seqno = 0; | |
1124 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | 1134 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | |
1125 | } | 1135 | } | |
1126 | 1136 | |||
1127 | return 0; | 1137 | return 0; | |
1128 | } | 1138 | } | |
1129 | 1139 | |||
1130 | /* A nonblocking variant of the above wait. This is a highly dangerous routine | 1140 | /* A nonblocking variant of the above wait. This is a highly dangerous routine | |
1131 | * as the object state may change during this call. | 1141 | * as the object state may change during this call. | |
1132 | */ | 1142 | */ | |
1133 | static __must_check int | 1143 | static __must_check int | |
1134 | i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | 1144 | i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | |
1135 | bool readonly) | 1145 | bool readonly) | |
1136 | { | 1146 | { | |
1137 | struct drm_device *dev = obj->base.dev; | 1147 | struct drm_device *dev = obj->base.dev; | |
1138 | struct drm_i915_private *dev_priv = dev->dev_private; | 1148 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1139 | struct intel_ring_buffer *ring = obj->ring; | 1149 | struct intel_ring_buffer *ring = obj->ring; | |
1140 | u32 seqno; | 1150 | u32 seqno; | |
1141 | int ret; | 1151 | int ret; | |
1142 | 1152 | |||
1143 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 1153 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | |
1144 | BUG_ON(!dev_priv->mm.interruptible); | 1154 | BUG_ON(!dev_priv->mm.interruptible); | |
1145 | 1155 | |||
1146 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; | 1156 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; | |
1147 | if (seqno == 0) | 1157 | if (seqno == 0) | |
1148 | return 0; | 1158 | return 0; | |
1149 | 1159 | |||
1150 | ret = i915_gem_check_wedge(dev_priv, true); | 1160 | ret = i915_gem_check_wedge(dev_priv, true); | |
1151 | if (ret) | 1161 | if (ret) | |
1152 | return ret; | 1162 | return ret; | |
1153 | 1163 | |||
1154 | ret = i915_gem_check_olr(ring, seqno); | 1164 | ret = i915_gem_check_olr(ring, seqno); | |
1155 | if (ret) | 1165 | if (ret) | |
1156 | return ret; | 1166 | return ret; | |
1157 | 1167 | |||
1158 | mutex_unlock(&dev->struct_mutex); | 1168 | mutex_unlock(&dev->struct_mutex); | |
1159 | ret = __wait_seqno(ring, seqno, true, NULL); | 1169 | ret = __wait_seqno(ring, seqno, true, NULL); | |
1160 | mutex_lock(&dev->struct_mutex); | 1170 | mutex_lock(&dev->struct_mutex); | |
1161 | 1171 | |||
1162 | i915_gem_retire_requests_ring(ring); | 1172 | i915_gem_retire_requests_ring(ring); | |
1163 | 1173 | |||
1164 | /* Manually manage the write flush as we may have not yet | 1174 | /* Manually manage the write flush as we may have not yet | |
1165 | * retired the buffer. | 1175 | * retired the buffer. | |
1166 | */ | 1176 | */ | |
1167 | if (obj->last_write_seqno && | 1177 | if (obj->last_write_seqno && | |
1168 | i915_seqno_passed(seqno, obj->last_write_seqno)) { | 1178 | i915_seqno_passed(seqno, obj->last_write_seqno)) { | |
1169 | obj->last_write_seqno = 0; | 1179 | obj->last_write_seqno = 0; | |
1170 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | 1180 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | |
1171 | } | 1181 | } | |
1172 | 1182 | |||
1173 | return ret; | 1183 | return ret; | |
1174 | } | 1184 | } | |
1175 | 1185 | |||
1176 | /** | 1186 | /** | |
1177 | * Called when user space prepares to use an object with the CPU, either | 1187 | * Called when user space prepares to use an object with the CPU, either | |
1178 | * through the mmap ioctl's mapping or a GTT mapping. | 1188 | * through the mmap ioctl's mapping or a GTT mapping. | |
1179 | */ | 1189 | */ | |
1180 | int | 1190 | int | |
1181 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | 1191 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |
1182 | struct drm_file *file) | 1192 | struct drm_file *file) | |
1183 | { | 1193 | { | |
1184 | struct drm_i915_gem_set_domain *args = data; | 1194 | struct drm_i915_gem_set_domain *args = data; | |
1185 | struct drm_i915_gem_object *obj; | 1195 | struct drm_i915_gem_object *obj; | |
1186 | uint32_t read_domains = args->read_domains; | 1196 | uint32_t read_domains = args->read_domains; | |
1187 | uint32_t write_domain = args->write_domain; | 1197 | uint32_t write_domain = args->write_domain; | |
1188 | int ret; | 1198 | int ret; | |
1189 | 1199 | |||
1190 | /* Only handle setting domains to types used by the CPU. */ | 1200 | /* Only handle setting domains to types used by the CPU. */ | |
1191 | if (write_domain & I915_GEM_GPU_DOMAINS) | 1201 | if (write_domain & I915_GEM_GPU_DOMAINS) | |
1192 | return -EINVAL; | 1202 | return -EINVAL; | |
1193 | 1203 | |||
1194 | if (read_domains & I915_GEM_GPU_DOMAINS) | 1204 | if (read_domains & I915_GEM_GPU_DOMAINS) | |
1195 | return -EINVAL; | 1205 | return -EINVAL; | |
1196 | 1206 | |||
1197 | /* Having something in the write domain implies it's in the read | 1207 | /* Having something in the write domain implies it's in the read | |
1198 | * domain, and only that read domain. Enforce that in the request. | 1208 | * domain, and only that read domain. Enforce that in the request. | |
1199 | */ | 1209 | */ | |
1200 | if (write_domain != 0 && read_domains != write_domain) | 1210 | if (write_domain != 0 && read_domains != write_domain) | |
1201 | return -EINVAL; | 1211 | return -EINVAL; | |
1202 | 1212 | |||
1203 | ret = i915_mutex_lock_interruptible(dev); | 1213 | ret = i915_mutex_lock_interruptible(dev); | |
1204 | if (ret) | 1214 | if (ret) | |
1205 | return ret; | 1215 | return ret; | |
1206 | 1216 | |||
1207 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 1217 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | |
1208 | if (&obj->base == NULL) { | 1218 | if (&obj->base == NULL) { | |
1209 | ret = -ENOENT; | 1219 | ret = -ENOENT; | |
1210 | goto unlock; | 1220 | goto unlock; | |
1211 | } | 1221 | } | |
1212 | 1222 | |||
1213 | /* Try to flush the object off the GPU without holding the lock. | 1223 | /* Try to flush the object off the GPU without holding the lock. | |
1214 | * We will repeat the flush holding the lock in the normal manner | 1224 | * We will repeat the flush holding the lock in the normal manner | |
1215 | * to catch cases where we are gazumped. | 1225 | * to catch cases where we are gazumped. | |
1216 | */ | 1226 | */ | |
1217 | ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain); | 1227 | ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain); | |
1218 | if (ret) | 1228 | if (ret) | |
1219 | goto unref; | 1229 | goto unref; | |
1220 | 1230 | |||
1221 | if (read_domains & I915_GEM_DOMAIN_GTT) { | 1231 | if (read_domains & I915_GEM_DOMAIN_GTT) { | |
1222 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); | 1232 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); | |
1223 | 1233 | |||
1224 | /* Silently promote "you're not bound, there was nothing to do" | 1234 | /* Silently promote "you're not bound, there was nothing to do" | |
1225 | * to success, since the client was just asking us to | 1235 | * to success, since the client was just asking us to | |
1226 | * make sure everything was done. | 1236 | * make sure everything was done. | |
1227 | */ | 1237 | */ | |
1228 | if (ret == -EINVAL) | 1238 | if (ret == -EINVAL) | |
1229 | ret = 0; | 1239 | ret = 0; | |
1230 | } else { | 1240 | } else { | |
1231 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | 1241 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | |
1232 | } | 1242 | } | |
1233 | 1243 | |||
1234 | unref: | 1244 | unref: | |
1235 | drm_gem_object_unreference(&obj->base); | 1245 | drm_gem_object_unreference(&obj->base); | |
1236 | unlock: | 1246 | unlock: | |
1237 | mutex_unlock(&dev->struct_mutex); | 1247 | mutex_unlock(&dev->struct_mutex); | |
1238 | return ret; | 1248 | return ret; | |
1239 | } | 1249 | } | |
1240 | 1250 | |||
1241 | /** | 1251 | /** | |
1242 | * Called when user space has done writes to this buffer | 1252 | * Called when user space has done writes to this buffer | |
1243 | */ | 1253 | */ | |
1244 | int | 1254 | int | |
1245 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | 1255 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |
1246 | struct drm_file *file) | 1256 | struct drm_file *file) | |
1247 | { | 1257 | { | |
1248 | struct drm_i915_gem_sw_finish *args = data; | 1258 | struct drm_i915_gem_sw_finish *args = data; | |
1249 | struct drm_i915_gem_object *obj; | 1259 | struct drm_i915_gem_object *obj; | |
1250 | int ret = 0; | 1260 | int ret = 0; | |
1251 | 1261 | |||
1252 | ret = i915_mutex_lock_interruptible(dev); | 1262 | ret = i915_mutex_lock_interruptible(dev); | |
1253 | if (ret) | 1263 | if (ret) | |
1254 | return ret; | 1264 | return ret; | |
1255 | 1265 | |||
1256 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 1266 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | |
1257 | if (&obj->base == NULL) { | 1267 | if (&obj->base == NULL) { | |
1258 | ret = -ENOENT; | 1268 | ret = -ENOENT; | |
1259 | goto unlock; | 1269 | goto unlock; | |
1260 | } | 1270 | } | |
1261 | 1271 | |||
1262 | /* Pinned buffers may be scanout, so flush the cache */ | 1272 | /* Pinned buffers may be scanout, so flush the cache */ | |
1263 | if (obj->pin_count) | 1273 | if (obj->pin_count) | |
1264 | i915_gem_object_flush_cpu_write_domain(obj); | 1274 | i915_gem_object_flush_cpu_write_domain(obj); | |
1265 | 1275 | |||
1266 | drm_gem_object_unreference(&obj->base); | 1276 | drm_gem_object_unreference(&obj->base); | |
1267 | unlock: | 1277 | unlock: | |
1268 | mutex_unlock(&dev->struct_mutex); | 1278 | mutex_unlock(&dev->struct_mutex); | |
1269 | return ret; | 1279 | return ret; | |
1270 | } | 1280 | } | |
1271 | 1281 | |||
1272 | /** | 1282 | /** | |
1273 | * Maps the contents of an object, returning the address it is mapped | 1283 | * Maps the contents of an object, returning the address it is mapped | |
1274 | * into. | 1284 | * into. | |
1275 | * | 1285 | * | |
1276 | * While the mapping holds a reference on the contents of the object, it doesn't | 1286 | * While the mapping holds a reference on the contents of the object, it doesn't | |
1277 | * imply a ref on the object itself. | 1287 | * imply a ref on the object itself. | |
1278 | */ | 1288 | */ | |
1279 | int | 1289 | int | |
1280 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | 1290 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |
1281 | struct drm_file *file) | 1291 | struct drm_file *file) | |
1282 | { | 1292 | { | |
1283 | struct drm_i915_gem_mmap *args = data; | 1293 | struct drm_i915_gem_mmap *args = data; | |
1284 | struct drm_gem_object *obj; | 1294 | struct drm_gem_object *obj; | |
1285 | unsigned long addr; | 1295 | unsigned long addr; | |
1286 | 1296 | |||
1287 | obj = drm_gem_object_lookup(dev, file, args->handle); | 1297 | obj = drm_gem_object_lookup(dev, file, args->handle); | |
1288 | if (obj == NULL) | 1298 | if (obj == NULL) | |
1289 | return -ENOENT; | 1299 | return -ENOENT; | |
1290 | 1300 | |||
1291 | /* prime objects have no backing filp to GEM mmap | 1301 | /* prime objects have no backing filp to GEM mmap | |
1292 | * pages from. | 1302 | * pages from. | |
1293 | */ | 1303 | */ | |
1294 | if (!obj->filp) { | 1304 | if (!obj->filp) { | |
1295 | drm_gem_object_unreference_unlocked(obj); | 1305 | drm_gem_object_unreference_unlocked(obj); | |
1296 | return -EINVAL; | 1306 | return -EINVAL; | |
1297 | } | 1307 | } | |
1298 | 1308 | |||
1299 | addr = vm_mmap(obj->filp, 0, args->size, | 1309 | addr = vm_mmap(obj->filp, 0, args->size, | |
1300 | PROT_READ | PROT_WRITE, MAP_SHARED, | 1310 | PROT_READ | PROT_WRITE, MAP_SHARED, | |
1301 | args->offset); | 1311 | args->offset); | |
1302 | drm_gem_object_unreference_unlocked(obj); | 1312 | drm_gem_object_unreference_unlocked(obj); | |
1303 | if (IS_ERR((void *)addr)) | 1313 | if (IS_ERR((void *)addr)) | |
1304 | return addr; | 1314 | return addr; | |
1305 | 1315 | |||
1306 | args->addr_ptr = (uint64_t) addr; | 1316 | args->addr_ptr = (uint64_t) addr; | |
1307 | 1317 | |||
1308 | return 0; | 1318 | return 0; | |
1309 | } | 1319 | } | |
1310 | 1320 | |||
1311 | /** | 1321 | /** | |
1312 | * i915_gem_fault - fault a page into the GTT | 1322 | * i915_gem_fault - fault a page into the GTT | |
1313 | * vma: VMA in question | 1323 | * vma: VMA in question | |
1314 | * vmf: fault info | 1324 | * vmf: fault info | |
1315 | * | 1325 | * | |
1316 | * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped | 1326 | * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped | |
1317 | * from userspace. The fault handler takes care of binding the object to | 1327 | * from userspace. The fault handler takes care of binding the object to | |
1318 | * the GTT (if needed), allocating and programming a fence register (again, | 1328 | * the GTT (if needed), allocating and programming a fence register (again, | |
1319 | * only if needed based on whether the old reg is still valid or the object | 1329 | * only if needed based on whether the old reg is still valid or the object | |
1320 | * is tiled) and inserting a new PTE into the faulting process. | 1330 | * is tiled) and inserting a new PTE into the faulting process. | |
1321 | * | 1331 | * | |
1322 | * Note that the faulting process may involve evicting existing objects | 1332 | * Note that the faulting process may involve evicting existing objects | |
1323 | * from the GTT and/or fence registers to make room. So performance may | 1333 | * from the GTT and/or fence registers to make room. So performance may | |
1324 | * suffer if the GTT working set is large or there are few fence registers | 1334 | * suffer if the GTT working set is large or there are few fence registers | |
1325 | * left. | 1335 | * left. | |
1326 | */ | 1336 | */ | |
1327 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1337 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
1328 | { | 1338 | { | |
1329 | struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); | 1339 | struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); | |
1330 | struct drm_device *dev = obj->base.dev; | 1340 | struct drm_device *dev = obj->base.dev; | |
1331 | drm_i915_private_t *dev_priv = dev->dev_private; | 1341 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1332 | pgoff_t page_offset; | 1342 | pgoff_t page_offset; | |
1333 | unsigned long pfn; | 1343 | unsigned long pfn; | |
1334 | int ret = 0; | 1344 | int ret = 0; | |
1335 | bool write = !!(vmf->flags & FAULT_FLAG_WRITE); | 1345 | bool write = !!(vmf->flags & FAULT_FLAG_WRITE); | |
1336 | 1346 | |||
1337 | /* We don't use vmf->pgoff since that has the fake offset */ | 1347 | /* We don't use vmf->pgoff since that has the fake offset */ | |
1338 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> | 1348 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> | |
1339 | PAGE_SHIFT; | 1349 | PAGE_SHIFT; | |
1340 | 1350 | |||
1341 | ret = i915_mutex_lock_interruptible(dev); | 1351 | ret = i915_mutex_lock_interruptible(dev); | |
1342 | if (ret) | 1352 | if (ret) | |
1343 | goto out; | 1353 | goto out; | |
1344 | 1354 | |||
1345 | trace_i915_gem_object_fault(obj, page_offset, true, write); | 1355 | trace_i915_gem_object_fault(obj, page_offset, true, write); | |
1346 | 1356 | |||
1347 | /* Now bind it into the GTT if needed */ | 1357 | /* Now bind it into the GTT if needed */ | |
1348 | ret = i915_gem_object_pin(obj, 0, true, false); | 1358 | ret = i915_gem_object_pin(obj, 0, true, false); | |
1349 | if (ret) | 1359 | if (ret) | |
1350 | goto unlock; | 1360 | goto unlock; | |
1351 | 1361 | |||
1352 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | 1362 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | |
1353 | if (ret) | 1363 | if (ret) | |
1354 | goto unpin; | 1364 | goto unpin; | |
1355 | 1365 | |||
1356 | ret = i915_gem_object_get_fence(obj); | 1366 | ret = i915_gem_object_get_fence(obj); | |
1357 | if (ret) | 1367 | if (ret) | |
1358 | goto unpin; | 1368 | goto unpin; | |
1359 | 1369 | |||
1360 | obj->fault_mappable = true; | 1370 | obj->fault_mappable = true; | |
1361 | 1371 | |||
1362 | pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) + | 1372 | pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) + | |
1363 | page_offset; | 1373 | page_offset; | |
1364 | 1374 | |||
1365 | /* Finally, remap it using the new GTT offset */ | 1375 | /* Finally, remap it using the new GTT offset */ | |
1366 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); | 1376 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); | |
1367 | unpin: | 1377 | unpin: | |
1368 | i915_gem_object_unpin(obj); | 1378 | i915_gem_object_unpin(obj); | |
1369 | unlock: | 1379 | unlock: | |
1370 | mutex_unlock(&dev->struct_mutex); | 1380 | mutex_unlock(&dev->struct_mutex); | |
1371 | out: | 1381 | out: | |
1372 | switch (ret) { | 1382 | switch (ret) { | |
1373 | case -EIO: | 1383 | case -EIO: | |
1374 | /* If this -EIO is due to a gpu hang, give the reset code a | 1384 | /* If this -EIO is due to a gpu hang, give the reset code a | |
1375 | * chance to clean up the mess. Otherwise return the proper | 1385 | * chance to clean up the mess. Otherwise return the proper | |
1376 | * SIGBUS. */ | 1386 | * SIGBUS. */ | |
1377 | if (!atomic_read(&dev_priv->mm.wedged)) | 1387 | if (!atomic_read(&dev_priv->mm.wedged)) | |
1378 | return VM_FAULT_SIGBUS; | 1388 | return VM_FAULT_SIGBUS; | |
1379 | case -EAGAIN: | 1389 | case -EAGAIN: | |
1380 | /* Give the error handler a chance to run and move the | 1390 | /* Give the error handler a chance to run and move the | |
1381 | * objects off the GPU active list. Next time we service the | 1391 | * objects off the GPU active list. Next time we service the | |
1382 | * fault, we should be able to transition the page into the | 1392 | * fault, we should be able to transition the page into the | |
1383 | * GTT without touching the GPU (and so avoid further | 1393 | * GTT without touching the GPU (and so avoid further | |
1384 | * EIO/EGAIN). If the GPU is wedged, then there is no issue | 1394 | * EIO/EGAIN). If the GPU is wedged, then there is no issue | |
1385 | * with coherency, just lost writes. | 1395 | * with coherency, just lost writes. | |
1386 | */ | 1396 | */ | |
1387 | set_need_resched(); | 1397 | set_need_resched(); | |
1388 | case 0: | 1398 | case 0: | |
1389 | case -ERESTARTSYS: | 1399 | case -ERESTARTSYS: | |
1390 | case -EINTR: | 1400 | case -EINTR: | |
1391 | case -EBUSY: | 1401 | case -EBUSY: | |
1392 | /* | 1402 | /* | |
1393 | * EBUSY is ok: this just means that another thread | 1403 | * EBUSY is ok: this just means that another thread | |
1394 | * already did the job. | 1404 | * already did the job. | |
1395 | */ | 1405 | */ | |
1396 | return VM_FAULT_NOPAGE; | 1406 | return VM_FAULT_NOPAGE; | |
1397 | case -ENOMEM: | 1407 | case -ENOMEM: | |
1398 | return VM_FAULT_OOM; | 1408 | return VM_FAULT_OOM; | |
1399 | case -ENOSPC: | 1409 | case -ENOSPC: | |
1400 | return VM_FAULT_SIGBUS; | 1410 | return VM_FAULT_SIGBUS; | |
1401 | default: | 1411 | default: | |
1402 | WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); | 1412 | WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); | |
1403 | return VM_FAULT_SIGBUS; | 1413 | return VM_FAULT_SIGBUS; | |
1404 | } | 1414 | } | |
1405 | } | 1415 | } | |
1406 | 1416 | |||
1407 | /** | 1417 | /** | |
1408 | * i915_gem_release_mmap - remove physical page mappings | 1418 | * i915_gem_release_mmap - remove physical page mappings | |
1409 | * @obj: obj in question | 1419 | * @obj: obj in question | |
1410 | * | 1420 | * | |
1411 | * Preserve the reservation of the mmapping with the DRM core code, but | 1421 | * Preserve the reservation of the mmapping with the DRM core code, but | |
1412 | * relinquish ownership of the pages back to the system. | 1422 | * relinquish ownership of the pages back to the system. | |
1413 | * | 1423 | * | |
1414 | * It is vital that we remove the page mapping if we have mapped a tiled | 1424 | * It is vital that we remove the page mapping if we have mapped a tiled | |
1415 | * object through the GTT and then lose the fence register due to | 1425 | * object through the GTT and then lose the fence register due to | |
1416 | * resource pressure. Similarly if the object has been moved out of the | 1426 | * resource pressure. Similarly if the object has been moved out of the | |
1417 | * aperture, than pages mapped into userspace must be revoked. Removing the | 1427 | * aperture, than pages mapped into userspace must be revoked. Removing the | |
1418 | * mapping will then trigger a page fault on the next user access, allowing | 1428 | * mapping will then trigger a page fault on the next user access, allowing | |
1419 | * fixup by i915_gem_fault(). | 1429 | * fixup by i915_gem_fault(). | |
1420 | */ | 1430 | */ | |
1421 | void | 1431 | void | |
1422 | i915_gem_release_mmap(struct drm_i915_gem_object *obj) | 1432 | i915_gem_release_mmap(struct drm_i915_gem_object *obj) | |
1423 | { | 1433 | { | |
1424 | if (!obj->fault_mappable) | 1434 | if (!obj->fault_mappable) | |
1425 | return; | 1435 | return; | |
1426 | 1436 | |||
1427 | if (obj->base.dev->dev_mapping) | 1437 | if (obj->base.dev->dev_mapping) | |
1428 | unmap_mapping_range(obj->base.dev->dev_mapping, | 1438 | unmap_mapping_range(obj->base.dev->dev_mapping, | |
1429 | (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, | 1439 | (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, | |
1430 | obj->base.size, 1); | 1440 | obj->base.size, 1); | |
1431 | 1441 | |||
1432 | obj->fault_mappable = false; | 1442 | obj->fault_mappable = false; | |
1433 | } | 1443 | } | |
1434 | 1444 | |||
1435 | static uint32_t | 1445 | static uint32_t | |
1436 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) | 1446 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) | |
1437 | { | 1447 | { | |
1438 | uint32_t gtt_size; | 1448 | uint32_t gtt_size; | |
1439 | 1449 | |||
1440 | if (INTEL_INFO(dev)->gen >= 4 || | 1450 | if (INTEL_INFO(dev)->gen >= 4 || | |
1441 | tiling_mode == I915_TILING_NONE) | 1451 | tiling_mode == I915_TILING_NONE) | |
1442 | return size; | 1452 | return size; | |
1443 | 1453 | |||
1444 | /* Previous chips need a power-of-two fence region when tiling */ | 1454 | /* Previous chips need a power-of-two fence region when tiling */ | |
1445 | if (INTEL_INFO(dev)->gen == 3) | 1455 | if (INTEL_INFO(dev)->gen == 3) | |
1446 | gtt_size = 1024*1024; | 1456 | gtt_size = 1024*1024; | |
1447 | else | 1457 | else | |
1448 | gtt_size = 512*1024; | 1458 | gtt_size = 512*1024; | |
1449 | 1459 | |||
1450 | while (gtt_size < size) | 1460 | while (gtt_size < size) | |
1451 | gtt_size <<= 1; | 1461 | gtt_size <<= 1; | |
1452 | 1462 | |||
1453 | return gtt_size; | 1463 | return gtt_size; | |
1454 | } | 1464 | } | |
1455 | 1465 | |||
1456 | /** | 1466 | /** | |
1457 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object | 1467 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object | |
1458 | * @obj: object to check | 1468 | * @obj: object to check | |
1459 | * | 1469 | * | |
1460 | * Return the required GTT alignment for an object, taking into account | 1470 | * Return the required GTT alignment for an object, taking into account | |
1461 | * potential fence register mapping. | 1471 | * potential fence register mapping. | |
1462 | */ | 1472 | */ | |
1463 | static uint32_t | 1473 | static uint32_t | |
1464 | i915_gem_get_gtt_alignment(struct drm_device *dev, | 1474 | i915_gem_get_gtt_alignment(struct drm_device *dev, | |
1465 | uint32_t size, | 1475 | uint32_t size, | |
1466 | int tiling_mode) | 1476 | int tiling_mode) | |
1467 | { | 1477 | { | |
1468 | /* | 1478 | /* | |
1469 | * Minimum alignment is 4k (GTT page size), but might be greater | 1479 | * Minimum alignment is 4k (GTT page size), but might be greater | |
1470 | * if a fence register is needed for the object. | 1480 | * if a fence register is needed for the object. | |
1471 | */ | 1481 | */ | |
1472 | if (INTEL_INFO(dev)->gen >= 4 || | 1482 | if (INTEL_INFO(dev)->gen >= 4 || | |
1473 | tiling_mode == I915_TILING_NONE) | 1483 | tiling_mode == I915_TILING_NONE) | |
1474 | return 4096; | 1484 | return 4096; | |
1475 | 1485 | |||
1476 | /* | 1486 | /* | |
1477 | * Previous chips need to be aligned to the size of the smallest | 1487 | * Previous chips need to be aligned to the size of the smallest | |
1478 | * fence register that can contain the object. | 1488 | * fence register that can contain the object. | |
1479 | */ | 1489 | */ | |
1480 | return i915_gem_get_gtt_size(dev, size, tiling_mode); | 1490 | return i915_gem_get_gtt_size(dev, size, tiling_mode); | |
1481 | } | 1491 | } | |
1482 | 1492 | |||
1483 | /** | 1493 | /** | |
1484 | * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an | 1494 | * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an | |
1485 | * unfenced object | 1495 | * unfenced object | |
1486 | * @dev: the device | 1496 | * @dev: the device | |
1487 | * @size: size of the object | 1497 | * @size: size of the object | |
1488 | * @tiling_mode: tiling mode of the object | 1498 | * @tiling_mode: tiling mode of the object | |
1489 | * | 1499 | * | |
1490 | * Return the required GTT alignment for an object, only taking into account | 1500 | * Return the required GTT alignment for an object, only taking into account | |
1491 | * unfenced tiled surface requirements. | 1501 | * unfenced tiled surface requirements. | |
1492 | */ | 1502 | */ | |
1493 | uint32_t | 1503 | uint32_t | |
1494 | i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, | 1504 | i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, | |
1495 | uint32_t size, | 1505 | uint32_t size, | |
1496 | int tiling_mode) | 1506 | int tiling_mode) | |
1497 | { | 1507 | { | |
1498 | /* | 1508 | /* | |
1499 | * Minimum alignment is 4k (GTT page size) for sane hw. | 1509 | * Minimum alignment is 4k (GTT page size) for sane hw. | |
1500 | */ | 1510 | */ | |
1501 | if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || | 1511 | if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || | |
1502 | tiling_mode == I915_TILING_NONE) | 1512 | tiling_mode == I915_TILING_NONE) | |
1503 | return 4096; | 1513 | return 4096; | |
1504 | 1514 | |||
1505 | /* Previous hardware however needs to be aligned to a power-of-two | 1515 | /* Previous hardware however needs to be aligned to a power-of-two | |
1506 | * tile height. The simplest method for determining this is to reuse | 1516 | * tile height. The simplest method for determining this is to reuse | |
1507 | * the power-of-tile object size. | 1517 | * the power-of-tile object size. | |
1508 | */ | 1518 | */ | |
1509 | return i915_gem_get_gtt_size(dev, size, tiling_mode); | 1519 | return i915_gem_get_gtt_size(dev, size, tiling_mode); | |
1510 | } | 1520 | } | |
1511 | 1521 | |||
1512 | static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) | 1522 | static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) | |
1513 | { | 1523 | { | |
1514 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 1524 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | |
1515 | int ret; | 1525 | int ret; | |
1516 | 1526 | |||
1517 | if (obj->base.map_list.map) | 1527 | if (obj->base.map_list.map) | |
1518 | return 0; | 1528 | return 0; | |
1519 | 1529 | |||
1520 | dev_priv->mm.shrinker_no_lock_stealing = true; | 1530 | dev_priv->mm.shrinker_no_lock_stealing = true; | |
1521 | 1531 | |||
1522 | ret = drm_gem_create_mmap_offset(&obj->base); | 1532 | ret = drm_gem_create_mmap_offset(&obj->base); | |
1523 | if (ret != -ENOSPC) | 1533 | if (ret != -ENOSPC) | |
1524 | goto out; | 1534 | goto out; | |
1525 | 1535 | |||
1526 | /* Badly fragmented mmap space? The only way we can recover | 1536 | /* Badly fragmented mmap space? The only way we can recover | |
1527 | * space is by destroying unwanted objects. We can't randomly release | 1537 | * space is by destroying unwanted objects. We can't randomly release | |
1528 | * mmap_offsets as userspace expects them to be persistent for the | 1538 | * mmap_offsets as userspace expects them to be persistent for the | |
1529 | * lifetime of the objects. The closest we can is to release the | 1539 | * lifetime of the objects. The closest we can is to release the | |
1530 | * offsets on purgeable objects by truncating it and marking it purged, | 1540 | * offsets on purgeable objects by truncating it and marking it purged, | |
1531 | * which prevents userspace from ever using that object again. | 1541 | * which prevents userspace from ever using that object again. | |
1532 | */ | 1542 | */ | |
1533 | i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); | 1543 | i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); | |
1534 | ret = drm_gem_create_mmap_offset(&obj->base); | 1544 | ret = drm_gem_create_mmap_offset(&obj->base); | |
1535 | if (ret != -ENOSPC) | 1545 | if (ret != -ENOSPC) | |
1536 | goto out; | 1546 | goto out; | |
1537 | 1547 | |||
1538 | i915_gem_shrink_all(dev_priv); | 1548 | i915_gem_shrink_all(dev_priv); | |
1539 | ret = drm_gem_create_mmap_offset(&obj->base); | 1549 | ret = drm_gem_create_mmap_offset(&obj->base); | |
1540 | out: | 1550 | out: | |
1541 | dev_priv->mm.shrinker_no_lock_stealing = false; | 1551 | dev_priv->mm.shrinker_no_lock_stealing = false; | |
1542 | 1552 | |||
1543 | return ret; | 1553 | return ret; | |
1544 | } | 1554 | } | |
1545 | 1555 | |||
1546 | static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) | 1556 | static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) | |
1547 | { | 1557 | { | |
1548 | if (!obj->base.map_list.map) | 1558 | if (!obj->base.map_list.map) | |
1549 | return; | 1559 | return; | |
1550 | 1560 | |||
1551 | drm_gem_free_mmap_offset(&obj->base); | 1561 | drm_gem_free_mmap_offset(&obj->base); | |
1552 | } | 1562 | } | |
1553 | 1563 | |||
1554 | int | 1564 | int | |
1555 | i915_gem_mmap_gtt(struct drm_file *file, | 1565 | i915_gem_mmap_gtt(struct drm_file *file, | |
1556 | struct drm_device *dev, | 1566 | struct drm_device *dev, | |
1557 | uint32_t handle, | 1567 | uint32_t handle, | |
1558 | uint64_t *offset) | 1568 | uint64_t *offset) | |
1559 | { | 1569 | { | |
1560 | struct drm_i915_private *dev_priv = dev->dev_private; | 1570 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1561 | struct drm_i915_gem_object *obj; | 1571 | struct drm_i915_gem_object *obj; | |
1562 | int ret; | 1572 | int ret; | |
1563 | 1573 | |||
1564 | ret = i915_mutex_lock_interruptible(dev); | 1574 | ret = i915_mutex_lock_interruptible(dev); | |
1565 | if (ret) | 1575 | if (ret) | |
1566 | return ret; | 1576 | return ret; | |
1567 | 1577 | |||
1568 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); | 1578 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); | |
1569 | if (&obj->base == NULL) { | 1579 | if (&obj->base == NULL) { | |
1570 | ret = -ENOENT; | 1580 | ret = -ENOENT; | |
1571 | goto unlock; | 1581 | goto unlock; | |
1572 | } | 1582 | } | |
1573 | 1583 | |||
1574 | if (obj->base.size > dev_priv->mm.gtt_mappable_end) { | 1584 | if (obj->base.size > dev_priv->mm.gtt_mappable_end) { | |
1575 | ret = -E2BIG; | 1585 | ret = -E2BIG; | |
1576 | goto out; | 1586 | goto out; | |
1577 | } | 1587 | } | |
1578 | 1588 | |||
1579 | if (obj->madv != I915_MADV_WILLNEED) { | 1589 | if (obj->madv != I915_MADV_WILLNEED) { | |
1580 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | 1590 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | |
1581 | ret = -EINVAL; | 1591 | ret = -EINVAL; | |
1582 | goto out; | 1592 | goto out; | |
1583 | } | 1593 | } | |
1584 | 1594 | |||
1585 | ret = i915_gem_object_create_mmap_offset(obj); | 1595 | ret = i915_gem_object_create_mmap_offset(obj); | |
1586 | if (ret) | 1596 | if (ret) | |
1587 | goto out; | 1597 | goto out; | |
1588 | 1598 | |||
1589 | *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; | 1599 | *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; | |
1590 | 1600 | |||
1591 | out: | 1601 | out: | |
1592 | drm_gem_object_unreference(&obj->base); | 1602 | drm_gem_object_unreference(&obj->base); | |
1593 | unlock: | 1603 | unlock: | |
1594 | mutex_unlock(&dev->struct_mutex); | 1604 | mutex_unlock(&dev->struct_mutex); | |
1595 | return ret; | 1605 | return ret; | |
1596 | } | 1606 | } | |
1597 | 1607 | |||
1598 | /** | 1608 | /** | |
1599 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing | 1609 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing | |
1600 | * @dev: DRM device | 1610 | * @dev: DRM device | |
1601 | * @data: GTT mapping ioctl data | 1611 | * @data: GTT mapping ioctl data | |
1602 | * @file: GEM object info | 1612 | * @file: GEM object info | |
1603 | * | 1613 | * | |
1604 | * Simply returns the fake offset to userspace so it can mmap it. | 1614 | * Simply returns the fake offset to userspace so it can mmap it. | |
1605 | * The mmap call will end up in drm_gem_mmap(), which will set things | 1615 | * The mmap call will end up in drm_gem_mmap(), which will set things | |
1606 | * up so we can get faults in the handler above. | 1616 | * up so we can get faults in the handler above. | |
1607 | * | 1617 | * | |
1608 | * The fault handler will take care of binding the object into the GTT | 1618 | * The fault handler will take care of binding the object into the GTT | |
1609 | * (since it may have been evicted to make room for something), allocating | 1619 | * (since it may have been evicted to make room for something), allocating | |
1610 | * a fence register, and mapping the appropriate aperture address into | 1620 | * a fence register, and mapping the appropriate aperture address into | |
1611 | * userspace. | 1621 | * userspace. | |
1612 | */ | 1622 | */ | |
1613 | int | 1623 | int | |
1614 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | 1624 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |
1615 | struct drm_file *file) | 1625 | struct drm_file *file) | |
1616 | { | 1626 | { | |
1617 | struct drm_i915_gem_mmap_gtt *args = data; | 1627 | struct drm_i915_gem_mmap_gtt *args = data; | |
1618 | 1628 | |||
1619 | return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); | 1629 | return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); | |
1620 | } | 1630 | } | |
1621 | 1631 | |||
1622 | /* Immediately discard the backing storage */ | 1632 | /* Immediately discard the backing storage */ | |
1623 | static void | 1633 | static void | |
1624 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) | 1634 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) | |
1625 | { | 1635 | { | |
1626 | struct inode *inode; | 1636 | struct inode *inode; | |
1627 | 1637 | |||
1628 | i915_gem_object_free_mmap_offset(obj); | 1638 | i915_gem_object_free_mmap_offset(obj); | |
1629 | 1639 | |||
1630 | if (obj->base.filp == NULL) | 1640 | if (obj->base.filp == NULL) | |
1631 | return; | 1641 | return; | |
1632 | 1642 | |||
1633 | /* Our goal here is to return as much of the memory as | 1643 | /* Our goal here is to return as much of the memory as | |
1634 | * is possible back to the system as we are called from OOM. | 1644 | * is possible back to the system as we are called from OOM. | |
1635 | * To do this we must instruct the shmfs to drop all of its | 1645 | * To do this we must instruct the shmfs to drop all of its | |
1636 | * backing pages, *now*. | 1646 | * backing pages, *now*. | |
1637 | */ | 1647 | */ | |
1638 | inode = obj->base.filp->f_path.dentry->d_inode; | 1648 | inode = obj->base.filp->f_path.dentry->d_inode; | |
1639 | shmem_truncate_range(inode, 0, (loff_t)-1); | 1649 | shmem_truncate_range(inode, 0, (loff_t)-1); | |
1640 | 1650 | |||
1641 | obj->madv = __I915_MADV_PURGED; | 1651 | obj->madv = __I915_MADV_PURGED; | |
1642 | } | 1652 | } | |
1643 | 1653 | |||
1644 | static inline int | 1654 | static inline int | |
1645 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) | 1655 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) | |
1646 | { | 1656 | { | |
1647 | return obj->madv == I915_MADV_DONTNEED; | 1657 | return obj->madv == I915_MADV_DONTNEED; | |
1648 | } | 1658 | } | |
1649 | 1659 | |||
1650 | static void | 1660 | static void | |
1651 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) | 1661 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) | |
1652 | { | 1662 | { | |
1653 | int page_count = obj->base.size / PAGE_SIZE; | 1663 | int page_count = obj->base.size / PAGE_SIZE; | |
1654 | struct scatterlist *sg; | 1664 | struct scatterlist *sg; | |
1655 | int ret, i; | 1665 | int ret, i; | |
1656 | 1666 | |||
1657 | BUG_ON(obj->madv == __I915_MADV_PURGED); | 1667 | BUG_ON(obj->madv == __I915_MADV_PURGED); | |
1658 | 1668 | |||
1659 | ret = i915_gem_object_set_to_cpu_domain(obj, true); | 1669 | ret = i915_gem_object_set_to_cpu_domain(obj, true); | |
1660 | if (ret) { | 1670 | if (ret) { | |
1661 | /* In the event of a disaster, abandon all caches and | 1671 | /* In the event of a disaster, abandon all caches and | |
1662 | * hope for the best. | 1672 | * hope for the best. | |
1663 | */ | 1673 | */ | |
1664 | WARN_ON(ret != -EIO); | 1674 | WARN_ON(ret != -EIO); | |
1665 | i915_gem_clflush_object(obj); | 1675 | i915_gem_clflush_object(obj); | |
1666 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 1676 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; | |
1667 | } | 1677 | } | |
1668 | 1678 | |||
1669 | if (i915_gem_object_needs_bit17_swizzle(obj)) | 1679 | if (i915_gem_object_needs_bit17_swizzle(obj)) | |
1670 | i915_gem_object_save_bit_17_swizzle(obj); | 1680 | i915_gem_object_save_bit_17_swizzle(obj); | |
1671 | 1681 | |||
1672 | if (obj->madv == I915_MADV_DONTNEED) | 1682 | if (obj->madv == I915_MADV_DONTNEED) | |
1673 | obj->dirty = 0; | 1683 | obj->dirty = 0; | |
1674 | 1684 | |||
1675 | for_each_sg(obj->pages->sgl, sg, page_count, i) { | 1685 | for_each_sg(obj->pages->sgl, sg, page_count, i) { | |
1676 | struct page *page = sg_page(sg); | 1686 | struct page *page = sg_page(sg); | |
1677 | 1687 | |||
1678 | if (obj->dirty) | 1688 | if (obj->dirty) | |
1679 | set_page_dirty(page); | 1689 | set_page_dirty(page); | |
1680 | 1690 | |||
1681 | if (obj->madv == I915_MADV_WILLNEED) | 1691 | if (obj->madv == I915_MADV_WILLNEED) | |
1682 | mark_page_accessed(page); | 1692 | mark_page_accessed(page); | |
1683 | 1693 | |||
1684 | page_cache_release(page); | 1694 | page_cache_release(page); | |
1685 | } | 1695 | } | |
1686 | obj->dirty = 0; | 1696 | obj->dirty = 0; | |
1687 | 1697 | |||
1688 | sg_free_table(obj->pages); | 1698 | sg_free_table(obj->pages); | |
1689 | kfree(obj->pages); | 1699 | kfree(obj->pages); | |
1690 | } | 1700 | } | |
1691 | 1701 | |||
1692 | static int | 1702 | static int | |
1693 | i915_gem_object_put_pages(struct drm_i915_gem_object *obj) | 1703 | i915_gem_object_put_pages(struct drm_i915_gem_object *obj) | |
1694 | { | 1704 | { | |
1695 | const struct drm_i915_gem_object_ops *ops = obj->ops; | 1705 | const struct drm_i915_gem_object_ops *ops = obj->ops; | |
1696 | 1706 | |||
1697 | if (obj->pages == NULL) | 1707 | if (obj->pages == NULL) | |
1698 | return 0; | 1708 | return 0; | |
1699 | 1709 | |||
1700 | BUG_ON(obj->gtt_space); | 1710 | BUG_ON(obj->gtt_space); | |
1701 | 1711 | |||
1702 | if (obj->pages_pin_count) | 1712 | if (obj->pages_pin_count) | |
1703 | return -EBUSY; | 1713 | return -EBUSY; | |
1704 | 1714 | |||
1705 | /* ->put_pages might need to allocate memory for the bit17 swizzle | 1715 | /* ->put_pages might need to allocate memory for the bit17 swizzle | |
1706 | * array, hence protect them from being reaped by removing them from gtt | 1716 | * array, hence protect them from being reaped by removing them from gtt | |
1707 | * lists early. */ | 1717 | * lists early. */ | |
1708 | list_del(&obj->gtt_list); | 1718 | list_del(&obj->gtt_list); | |
1709 | 1719 | |||
1710 | ops->put_pages(obj); | 1720 | ops->put_pages(obj); | |
1711 | obj->pages = NULL; | 1721 | obj->pages = NULL; | |
1712 | 1722 | |||
1713 | if (i915_gem_object_is_purgeable(obj)) | 1723 | if (i915_gem_object_is_purgeable(obj)) | |
1714 | i915_gem_object_truncate(obj); | 1724 | i915_gem_object_truncate(obj); | |
1715 | 1725 | |||
1716 | return 0; | 1726 | return 0; | |
1717 | } | 1727 | } | |
1718 | 1728 | |||
1719 | static long | 1729 | static long | |
1720 | __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, | 1730 | __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, | |
1721 | bool purgeable_only) | 1731 | bool purgeable_only) | |
1722 | { | 1732 | { | |
1723 | struct drm_i915_gem_object *obj, *next; | 1733 | struct drm_i915_gem_object *obj, *next; | |
1724 | long count = 0; | 1734 | long count = 0; | |
1725 | 1735 | |||
1726 | list_for_each_entry_safe(obj, next, | 1736 | list_for_each_entry_safe(obj, next, | |
1727 | &dev_priv->mm.unbound_list, | 1737 | &dev_priv->mm.unbound_list, | |
1728 | gtt_list) { | 1738 | gtt_list) { | |
1729 | if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && | 1739 | if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && | |
1730 | i915_gem_object_put_pages(obj) == 0) { | 1740 | i915_gem_object_put_pages(obj) == 0) { | |
1731 | count += obj->base.size >> PAGE_SHIFT; | 1741 | count += obj->base.size >> PAGE_SHIFT; | |
1732 | if (count >= target) | 1742 | if (count >= target) | |
1733 | return count; | 1743 | return count; | |
1734 | } | 1744 | } | |
1735 | } | 1745 | } | |
1736 | 1746 | |||
1737 | list_for_each_entry_safe(obj, next, | 1747 | list_for_each_entry_safe(obj, next, | |
1738 | &dev_priv->mm.inactive_list, | 1748 | &dev_priv->mm.inactive_list, | |
1739 | mm_list) { | 1749 | mm_list) { | |
1740 | if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && | 1750 | if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && | |
1741 | i915_gem_object_unbind(obj) == 0 && | 1751 | i915_gem_object_unbind(obj) == 0 && | |
1742 | i915_gem_object_put_pages(obj) == 0) { | 1752 | i915_gem_object_put_pages(obj) == 0) { | |
1743 | count += obj->base.size >> PAGE_SHIFT; | 1753 | count += obj->base.size >> PAGE_SHIFT; | |
1744 | if (count >= target) | 1754 | if (count >= target) | |
1745 | return count; | 1755 | return count; | |
1746 | } | 1756 | } | |
1747 | } | 1757 | } | |
1748 | 1758 | |||
1749 | return count; | 1759 | return count; | |
1750 | } | 1760 | } | |
1751 | 1761 | |||
1752 | static long | 1762 | static long | |
1753 | i915_gem_purge(struct drm_i915_private *dev_priv, long target) | 1763 | i915_gem_purge(struct drm_i915_private *dev_priv, long target) | |
1754 | { | 1764 | { | |
1755 | return __i915_gem_shrink(dev_priv, target, true); | 1765 | return __i915_gem_shrink(dev_priv, target, true); | |
1756 | } | 1766 | } | |
1757 | 1767 | |||
1758 | static void | 1768 | static void | |
1759 | i915_gem_shrink_all(struct drm_i915_private *dev_priv) | 1769 | i915_gem_shrink_all(struct drm_i915_private *dev_priv) | |
1760 | { | 1770 | { | |
1761 | struct drm_i915_gem_object *obj, *next; | 1771 | struct drm_i915_gem_object *obj, *next; | |
1762 | 1772 | |||
1763 | i915_gem_evict_everything(dev_priv->dev); | 1773 | i915_gem_evict_everything(dev_priv->dev); | |
1764 | 1774 | |||
1765 | list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) | 1775 | list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) | |
1766 | i915_gem_object_put_pages(obj); | 1776 | i915_gem_object_put_pages(obj); | |
1767 | } | 1777 | } | |
1768 | 1778 | |||
1769 | static int | 1779 | static int | |
1770 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) | 1780 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) | |
1771 | { | 1781 | { | |
1772 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 1782 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | |
1773 | int page_count, i; | 1783 | int page_count, i; | |
1774 | struct address_space *mapping; | 1784 | struct address_space *mapping; | |
1775 | struct sg_table *st; | 1785 | struct sg_table *st; | |
1776 | struct scatterlist *sg; | 1786 | struct scatterlist *sg; | |
1777 | struct page *page; | 1787 | struct page *page; | |
1778 | gfp_t gfp; | 1788 | gfp_t gfp; | |
1779 | 1789 | |||
1780 | /* Assert that the object is not currently in any GPU domain. As it | 1790 | /* Assert that the object is not currently in any GPU domain. As it | |
1781 | * wasn't in the GTT, there shouldn't be any way it could have been in | 1791 | * wasn't in the GTT, there shouldn't be any way it could have been in | |
1782 | * a GPU cache | 1792 | * a GPU cache | |
1783 | */ | 1793 | */ | |
1784 | BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); | 1794 | BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); | |
1785 | BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); | 1795 | BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); | |
1786 | 1796 | |||
1787 | st = kmalloc(sizeof(*st), GFP_KERNEL); | 1797 | st = kmalloc(sizeof(*st), GFP_KERNEL); | |
1788 | if (st == NULL) | 1798 | if (st == NULL) | |
1789 | return -ENOMEM; | 1799 | return -ENOMEM; | |
1790 | 1800 | |||
1791 | page_count = obj->base.size / PAGE_SIZE; | 1801 | page_count = obj->base.size / PAGE_SIZE; | |
1792 | if (sg_alloc_table(st, page_count, GFP_KERNEL)) { | 1802 | if (sg_alloc_table(st, page_count, GFP_KERNEL)) { | |
1793 | sg_free_table(st); | 1803 | sg_free_table(st); | |
1794 | kfree(st); | 1804 | kfree(st); | |
1795 | return -ENOMEM; | 1805 | return -ENOMEM; | |
1796 | } | 1806 | } | |
1797 | 1807 | |||
1798 | /* Get the list of pages out of our struct file. They'll be pinned | 1808 | /* Get the list of pages out of our struct file. They'll be pinned | |
1799 | * at this point until we release them. | 1809 | * at this point until we release them. | |
1800 | * | 1810 | * | |
1801 | * Fail silently without starting the shrinker | 1811 | * Fail silently without starting the shrinker | |
1802 | */ | 1812 | */ | |
1803 | mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | 1813 | mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; | |
1804 | gfp = mapping_gfp_mask(mapping); | 1814 | gfp = mapping_gfp_mask(mapping); | |
1805 | gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; | 1815 | gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; | |
1806 | gfp &= ~(__GFP_IO | __GFP_WAIT); | 1816 | gfp &= ~(__GFP_IO | __GFP_WAIT); | |
1807 | for_each_sg(st->sgl, sg, page_count, i) { | 1817 | for_each_sg(st->sgl, sg, page_count, i) { | |
1808 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); | 1818 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); | |
1809 | if (IS_ERR(page)) { | 1819 | if (IS_ERR(page)) { | |
1810 | i915_gem_purge(dev_priv, page_count); | 1820 | i915_gem_purge(dev_priv, page_count); | |
1811 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); | 1821 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); | |
1812 | } | 1822 | } | |
1813 | if (IS_ERR(page)) { | 1823 | if (IS_ERR(page)) { | |
1814 | /* We've tried hard to allocate the memory by reaping | 1824 | /* We've tried hard to allocate the memory by reaping | |
1815 | * our own buffer, now let the real VM do its job and | 1825 | * our own buffer, now let the real VM do its job and | |
1816 | * go down in flames if truly OOM. | 1826 | * go down in flames if truly OOM. | |
1817 | */ | 1827 | */ | |
1818 | gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD); | 1828 | gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD); | |
1819 | gfp |= __GFP_IO | __GFP_WAIT; | 1829 | gfp |= __GFP_IO | __GFP_WAIT; | |
1820 | 1830 | |||
1821 | i915_gem_shrink_all(dev_priv); | 1831 | i915_gem_shrink_all(dev_priv); | |
1822 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); | 1832 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); | |
1823 | if (IS_ERR(page)) | 1833 | if (IS_ERR(page)) | |
1824 | goto err_pages; | 1834 | goto err_pages; | |
1825 | 1835 | |||
1826 | gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; | 1836 | gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; | |
1827 | gfp &= ~(__GFP_IO | __GFP_WAIT); | 1837 | gfp &= ~(__GFP_IO | __GFP_WAIT); | |
1828 | } | 1838 | } | |
1829 | 1839 | |||
1830 | sg_set_page(sg, page, PAGE_SIZE, 0); | 1840 | sg_set_page(sg, page, PAGE_SIZE, 0); | |
1831 | } | 1841 | } | |
1832 | 1842 | |||
1833 | obj->pages = st; | 1843 | obj->pages = st; | |
1834 | 1844 | |||
1835 | if (i915_gem_object_needs_bit17_swizzle(obj)) | 1845 | if (i915_gem_object_needs_bit17_swizzle(obj)) | |
1836 | i915_gem_object_do_bit_17_swizzle(obj); | 1846 | i915_gem_object_do_bit_17_swizzle(obj); | |
1837 | 1847 | |||
1838 | return 0; | 1848 | return 0; | |
1839 | 1849 | |||
1840 | err_pages: | 1850 | err_pages: | |
1841 | for_each_sg(st->sgl, sg, i, page_count) | 1851 | for_each_sg(st->sgl, sg, i, page_count) | |
1842 | page_cache_release(sg_page(sg)); | 1852 | page_cache_release(sg_page(sg)); | |
1843 | sg_free_table(st); | 1853 | sg_free_table(st); | |
1844 | kfree(st); | 1854 | kfree(st); | |
1845 | return PTR_ERR(page); | 1855 | return PTR_ERR(page); | |
1846 | } | 1856 | } | |
1847 | 1857 | |||
1848 | /* Ensure that the associated pages are gathered from the backing storage | 1858 | /* Ensure that the associated pages are gathered from the backing storage | |
1849 | * and pinned into our object. i915_gem_object_get_pages() may be called | 1859 | * and pinned into our object. i915_gem_object_get_pages() may be called | |
1850 | * multiple times before they are released by a single call to | 1860 | * multiple times before they are released by a single call to | |
1851 | * i915_gem_object_put_pages() - once the pages are no longer referenced | 1861 | * i915_gem_object_put_pages() - once the pages are no longer referenced | |
1852 | * either as a result of memory pressure (reaping pages under the shrinker) | 1862 | * either as a result of memory pressure (reaping pages under the shrinker) | |
1853 | * or as the object is itself released. | 1863 | * or as the object is itself released. | |
1854 | */ | 1864 | */ | |
1855 | int | 1865 | int | |
1856 | i915_gem_object_get_pages(struct drm_i915_gem_object *obj) | 1866 | i915_gem_object_get_pages(struct drm_i915_gem_object *obj) | |
1857 | { | 1867 | { | |
1858 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 1868 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | |
1859 | const struct drm_i915_gem_object_ops *ops = obj->ops; | 1869 | const struct drm_i915_gem_object_ops *ops = obj->ops; | |
1860 | int ret; | 1870 | int ret; | |
1861 | 1871 | |||
1862 | if (obj->pages) | 1872 | if (obj->pages) | |
1863 | return 0; | 1873 | return 0; | |
1864 | 1874 | |||
1865 | BUG_ON(obj->pages_pin_count); | 1875 | BUG_ON(obj->pages_pin_count); | |
1866 | 1876 | |||
1867 | ret = ops->get_pages(obj); | 1877 | ret = ops->get_pages(obj); | |
1868 | if (ret) | 1878 | if (ret) | |
1869 | return ret; | 1879 | return ret; | |
1870 | 1880 | |||
1871 | list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); | 1881 | list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); | |
1872 | return 0; | 1882 | return 0; | |
1873 | } | 1883 | } | |
1874 | 1884 | |||
1875 | void | 1885 | void | |
1876 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | 1886 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | |
1877 | struct intel_ring_buffer *ring) | 1887 | struct intel_ring_buffer *ring) | |
1878 | { | 1888 | { | |
1879 | struct drm_device *dev = obj->base.dev; | 1889 | struct drm_device *dev = obj->base.dev; | |
1880 | struct drm_i915_private *dev_priv = dev->dev_private; | 1890 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1881 | u32 seqno = intel_ring_get_seqno(ring); | 1891 | u32 seqno = intel_ring_get_seqno(ring); | |
1882 | 1892 | |||
1883 | BUG_ON(ring == NULL); | 1893 | BUG_ON(ring == NULL); | |
1884 | obj->ring = ring; | 1894 | obj->ring = ring; | |
1885 | 1895 | |||
1886 | /* Add a reference if we're newly entering the active list. */ | 1896 | /* Add a reference if we're newly entering the active list. */ | |
1887 | if (!obj->active) { | 1897 | if (!obj->active) { | |
1888 | drm_gem_object_reference(&obj->base); | 1898 | drm_gem_object_reference(&obj->base); | |
1889 | obj->active = 1; | 1899 | obj->active = 1; | |
1890 | } | 1900 | } | |
1891 | 1901 | |||
1892 | /* Move from whatever list we were on to the tail of execution. */ | 1902 | /* Move from whatever list we were on to the tail of execution. */ | |
1893 | list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); | 1903 | list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); | |
1894 | list_move_tail(&obj->ring_list, &ring->active_list); | 1904 | list_move_tail(&obj->ring_list, &ring->active_list); | |
1895 | 1905 | |||
1896 | obj->last_read_seqno = seqno; | 1906 | obj->last_read_seqno = seqno; | |
1897 | 1907 | |||
1898 | if (obj->fenced_gpu_access) { | 1908 | if (obj->fenced_gpu_access) { | |
1899 | obj->last_fenced_seqno = seqno; | 1909 | obj->last_fenced_seqno = seqno; | |
1900 | 1910 | |||
1901 | /* Bump MRU to take account of the delayed flush */ | 1911 | /* Bump MRU to take account of the delayed flush */ | |
1902 | if (obj->fence_reg != I915_FENCE_REG_NONE) { | 1912 | if (obj->fence_reg != I915_FENCE_REG_NONE) { | |
1903 | struct drm_i915_fence_reg *reg; | 1913 | struct drm_i915_fence_reg *reg; | |
1904 | 1914 | |||
1905 | reg = &dev_priv->fence_regs[obj->fence_reg]; | 1915 | reg = &dev_priv->fence_regs[obj->fence_reg]; | |
1906 | list_move_tail(®->lru_list, | 1916 | list_move_tail(®->lru_list, | |
1907 | &dev_priv->mm.fence_list); | 1917 | &dev_priv->mm.fence_list); | |
1908 | } | 1918 | } | |
1909 | } | 1919 | } | |
1910 | } | 1920 | } | |
1911 | 1921 | |||
1912 | static void | 1922 | static void | |
1913 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | 1923 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | |
1914 | { | 1924 | { | |
1915 | struct drm_device *dev = obj->base.dev; | 1925 | struct drm_device *dev = obj->base.dev; | |
1916 | struct drm_i915_private *dev_priv = dev->dev_private; | 1926 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1917 | 1927 | |||
1918 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); | 1928 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); | |
1919 | BUG_ON(!obj->active); | 1929 | BUG_ON(!obj->active); | |
1920 | 1930 | |||
1921 | if (obj->pin_count) /* are we a framebuffer? */ | 1931 | if (obj->pin_count) /* are we a framebuffer? */ | |
1922 | intel_mark_fb_idle(obj); | 1932 | intel_mark_fb_idle(obj); | |
1923 | 1933 | |||
1924 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | 1934 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | |
1925 | 1935 | |||
1926 | list_del_init(&obj->ring_list); | 1936 | list_del_init(&obj->ring_list); | |
1927 | obj->ring = NULL; | 1937 | obj->ring = NULL; | |
1928 | 1938 | |||
1929 | obj->last_read_seqno = 0; | 1939 | obj->last_read_seqno = 0; | |
1930 | obj->last_write_seqno = 0; | 1940 | obj->last_write_seqno = 0; | |
1931 | obj->base.write_domain = 0; | 1941 | obj->base.write_domain = 0; | |
1932 | 1942 | |||
1933 | obj->last_fenced_seqno = 0; | 1943 | obj->last_fenced_seqno = 0; | |
1934 | obj->fenced_gpu_access = false; | 1944 | obj->fenced_gpu_access = false; | |
1935 | 1945 | |||
1936 | obj->active = 0; | 1946 | obj->active = 0; | |
1937 | drm_gem_object_unreference(&obj->base); | 1947 | drm_gem_object_unreference(&obj->base); | |
1938 | 1948 | |||
1939 | WARN_ON(i915_verify_lists(dev)); | 1949 | WARN_ON(i915_verify_lists(dev)); | |
1940 | } | 1950 | } | |
1941 | 1951 | |||
1942 | static int | 1952 | static int | |
1943 | i915_gem_handle_seqno_wrap(struct drm_device *dev) | 1953 | i915_gem_handle_seqno_wrap(struct drm_device *dev) | |
1944 | { | 1954 | { | |
1945 | struct drm_i915_private *dev_priv = dev->dev_private; | 1955 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1946 | struct intel_ring_buffer *ring; | 1956 | struct intel_ring_buffer *ring; | |
1947 | int ret, i, j; | 1957 | int ret, i, j; | |
1948 | 1958 | |||
1949 | /* The hardware uses various monotonic 32-bit counters, if we | 1959 | /* The hardware uses various monotonic 32-bit counters, if we | |
1950 | * detect that they will wraparound we need to idle the GPU | 1960 | * detect that they will wraparound we need to idle the GPU | |
1951 | * and reset those counters. | 1961 | * and reset those counters. | |
1952 | */ | 1962 | */ | |
1953 | ret = 0; | 1963 | ret = 0; | |
1954 | for_each_ring(ring, dev_priv, i) { | 1964 | for_each_ring(ring, dev_priv, i) { | |
1955 | for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) | 1965 | for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) | |
1956 | ret |= ring->sync_seqno[j] != 0; | 1966 | ret |= ring->sync_seqno[j] != 0; | |
1957 | } | 1967 | } | |
1958 | if (ret == 0) | 1968 | if (ret == 0) | |
1959 | return ret; | 1969 | return ret; | |
1960 | 1970 | |||
1961 | ret = i915_gpu_idle(dev); | 1971 | ret = i915_gpu_idle(dev); | |
1962 | if (ret) | 1972 | if (ret) | |
1963 | return ret; | 1973 | return ret; | |
1964 | 1974 | |||
1965 | i915_gem_retire_requests(dev); | 1975 | i915_gem_retire_requests(dev); | |
1966 | for_each_ring(ring, dev_priv, i) { | 1976 | for_each_ring(ring, dev_priv, i) { | |
1967 | for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) | 1977 | for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) | |
1968 | ring->sync_seqno[j] = 0; | 1978 | ring->sync_seqno[j] = 0; | |
1969 | } | 1979 | } | |
1970 | 1980 | |||
1971 | return 0; | 1981 | return 0; | |
1972 | } | 1982 | } | |
1973 | 1983 | |||
1974 | int | 1984 | int | |
1975 | i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) | 1985 | i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) | |
1976 | { | 1986 | { | |
1977 | struct drm_i915_private *dev_priv = dev->dev_private; | 1987 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1978 | 1988 | |||
1979 | /* reserve 0 for non-seqno */ | 1989 | /* reserve 0 for non-seqno */ | |
1980 | if (dev_priv->next_seqno == 0) { | 1990 | if (dev_priv->next_seqno == 0) { | |
1981 | int ret = i915_gem_handle_seqno_wrap(dev); | 1991 | int ret = i915_gem_handle_seqno_wrap(dev); | |
1982 | if (ret) | 1992 | if (ret) | |
1983 | return ret; | 1993 | return ret; | |
1984 | 1994 | |||
1985 | dev_priv->next_seqno = 1; | 1995 | dev_priv->next_seqno = 1; | |
1986 | } | 1996 | } | |
1987 | 1997 | |||
1988 | *seqno = dev_priv->next_seqno++; | 1998 | *seqno = dev_priv->next_seqno++; | |
1989 | return 0; | 1999 | return 0; | |
1990 | } | 2000 | } | |
1991 | 2001 | |||
1992 | int | 2002 | int | |
1993 | i915_add_request(struct intel_ring_buffer *ring, | 2003 | i915_add_request(struct intel_ring_buffer *ring, | |
1994 | struct drm_file *file, | 2004 | struct drm_file *file, | |
1995 | u32 *out_seqno) | 2005 | u32 *out_seqno) | |
1996 | { | 2006 | { | |
1997 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 2007 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | |
1998 | struct drm_i915_gem_request *request; | 2008 | struct drm_i915_gem_request *request; | |
1999 | u32 request_ring_position; | 2009 | u32 request_ring_position; | |
2000 | int was_empty; | 2010 | int was_empty; | |
2001 | int ret; | 2011 | int ret; | |
2002 | 2012 | |||
2003 | /* | 2013 | /* | |
2004 | * Emit any outstanding flushes - execbuf can fail to emit the flush | 2014 | * Emit any outstanding flushes - execbuf can fail to emit the flush | |
2005 | * after having emitted the batchbuffer command. Hence we need to fix | 2015 | * after having emitted the batchbuffer command. Hence we need to fix | |
2006 | * things up similar to emitting the lazy request. The difference here | 2016 | * things up similar to emitting the lazy request. The difference here | |
2007 | * is that the flush _must_ happen before the next request, no matter | 2017 | * is that the flush _must_ happen before the next request, no matter | |
2008 | * what. | 2018 | * what. | |
2009 | */ | 2019 | */ | |
2010 | ret = intel_ring_flush_all_caches(ring); | 2020 | ret = intel_ring_flush_all_caches(ring); | |
2011 | if (ret) | 2021 | if (ret) | |
2012 | return ret; | 2022 | return ret; | |
2013 | 2023 | |||
2014 | request = kmalloc(sizeof(*request), GFP_KERNEL); | 2024 | request = kmalloc(sizeof(*request), GFP_KERNEL); | |
2015 | if (request == NULL) | 2025 | if (request == NULL) | |
2016 | return -ENOMEM; | 2026 | return -ENOMEM; | |
2017 | 2027 | |||
2018 | 2028 | |||
2019 | /* Record the position of the start of the request so that | 2029 | /* Record the position of the start of the request so that | |
2020 | * should we detect the updated seqno part-way through the | 2030 | * should we detect the updated seqno part-way through the | |
2021 | * GPU processing the request, we never over-estimate the | 2031 | * GPU processing the request, we never over-estimate the | |
2022 | * position of the head. | 2032 | * position of the head. | |
2023 | */ | 2033 | */ | |
2024 | request_ring_position = intel_ring_get_tail(ring); | 2034 | request_ring_position = intel_ring_get_tail(ring); | |
2025 | 2035 | |||
2026 | ret = ring->add_request(ring); | 2036 | ret = ring->add_request(ring); | |
2027 | if (ret) { | 2037 | if (ret) { | |
2028 | kfree(request); | 2038 | kfree(request); | |
2029 | return ret; | 2039 | return ret; | |
2030 | } | 2040 | } | |
2031 | 2041 | |||
2032 | request->seqno = intel_ring_get_seqno(ring); | 2042 | request->seqno = intel_ring_get_seqno(ring); | |
2033 | request->ring = ring; | 2043 | request->ring = ring; | |
2034 | request->tail = request_ring_position; | 2044 | request->tail = request_ring_position; | |
2035 | request->emitted_jiffies = jiffies; | 2045 | request->emitted_jiffies = jiffies; | |
2036 | was_empty = list_empty(&ring->request_list); | 2046 | was_empty = list_empty(&ring->request_list); | |
2037 | list_add_tail(&request->list, &ring->request_list); | 2047 | list_add_tail(&request->list, &ring->request_list); |
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_irq.c 2013/07/23 21:28:22 1.1.1.1.2.2
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_irq.c 2013/07/24 03:06:00 1.1.1.1.2.3
@@ -1,2678 +1,2700 @@ | @@ -1,2678 +1,2700 @@ | |||
1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- | 1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- | |
2 | */ | 2 | */ | |
3 | /* | 3 | /* | |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | 4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | |
5 | * All Rights Reserved. | 5 | * All Rights Reserved. | |
6 | * | 6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | 7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | 8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | 9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | 10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | 11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | 12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | 13 | * the following conditions: | |
14 | * | 14 | * | |
15 | * The above copyright notice and this permission notice (including the | 15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | 16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | 17 | * of the Software. | |
18 | * | 18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | 21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | 22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | 23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | 24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | 26 | * | |
27 | */ | 27 | */ | |
28 | 28 | |||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
30 | 30 | |||
31 | #include <linux/sysrq.h> | 31 | #include <linux/sysrq.h> | |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> | |
33 | #include <drm/drmP.h> | 33 | #include <drm/drmP.h> | |
34 | #include <drm/i915_drm.h> | 34 | #include <drm/i915_drm.h> | |
35 | #include "i915_drv.h" | 35 | #include "i915_drv.h" | |
36 | #include "i915_trace.h" | 36 | #include "i915_trace.h" | |
37 | #include "intel_drv.h" | 37 | #include "intel_drv.h" | |
38 | 38 | |||
39 | /* For display hotplug interrupt */ | 39 | /* For display hotplug interrupt */ | |
40 | static void | 40 | static void | |
41 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 41 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | |
42 | { | 42 | { | |
43 | if ((dev_priv->irq_mask & mask) != 0) { | 43 | if ((dev_priv->irq_mask & mask) != 0) { | |
44 | dev_priv->irq_mask &= ~mask; | 44 | dev_priv->irq_mask &= ~mask; | |
45 | I915_WRITE(DEIMR, dev_priv->irq_mask); | 45 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
46 | POSTING_READ(DEIMR); | 46 | POSTING_READ(DEIMR); | |
47 | } | 47 | } | |
48 | } | 48 | } | |
49 | 49 | |||
50 | static inline void | 50 | static inline void | |
51 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | 51 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) | |
52 | { | 52 | { | |
53 | if ((dev_priv->irq_mask & mask) != mask) { | 53 | if ((dev_priv->irq_mask & mask) != mask) { | |
54 | dev_priv->irq_mask |= mask; | 54 | dev_priv->irq_mask |= mask; | |
55 | I915_WRITE(DEIMR, dev_priv->irq_mask); | 55 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
56 | POSTING_READ(DEIMR); | 56 | POSTING_READ(DEIMR); | |
57 | } | 57 | } | |
58 | } | 58 | } | |
59 | 59 | |||
60 | void | 60 | void | |
61 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | 61 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | |
62 | { | 62 | { | |
63 | if ((dev_priv->pipestat[pipe] & mask) != mask) { | 63 | if ((dev_priv->pipestat[pipe] & mask) != mask) { | |
64 | u32 reg = PIPESTAT(pipe); | 64 | u32 reg = PIPESTAT(pipe); | |
65 | 65 | |||
66 | dev_priv->pipestat[pipe] |= mask; | 66 | dev_priv->pipestat[pipe] |= mask; | |
67 | /* Enable the interrupt, clear any pending status */ | 67 | /* Enable the interrupt, clear any pending status */ | |
68 | I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); | 68 | I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); | |
69 | POSTING_READ(reg); | 69 | POSTING_READ(reg); | |
70 | } | 70 | } | |
71 | } | 71 | } | |
72 | 72 | |||
73 | void | 73 | void | |
74 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | 74 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | |
75 | { | 75 | { | |
76 | if ((dev_priv->pipestat[pipe] & mask) != 0) { | 76 | if ((dev_priv->pipestat[pipe] & mask) != 0) { | |
77 | u32 reg = PIPESTAT(pipe); | 77 | u32 reg = PIPESTAT(pipe); | |
78 | 78 | |||
79 | dev_priv->pipestat[pipe] &= ~mask; | 79 | dev_priv->pipestat[pipe] &= ~mask; | |
80 | I915_WRITE(reg, dev_priv->pipestat[pipe]); | 80 | I915_WRITE(reg, dev_priv->pipestat[pipe]); | |
81 | POSTING_READ(reg); | 81 | POSTING_READ(reg); | |
82 | } | 82 | } | |
83 | } | 83 | } | |
84 | 84 | |||
85 | /** | 85 | /** | |
86 | * intel_enable_asle - enable ASLE interrupt for OpRegion | 86 | * intel_enable_asle - enable ASLE interrupt for OpRegion | |
87 | */ | 87 | */ | |
88 | void intel_enable_asle(struct drm_device *dev) | 88 | void intel_enable_asle(struct drm_device *dev) | |
89 | { | 89 | { | |
90 | drm_i915_private_t *dev_priv = dev->dev_private; | 90 | drm_i915_private_t *dev_priv = dev->dev_private; | |
91 | unsigned long irqflags; | 91 | unsigned long irqflags; | |
92 | 92 | |||
93 | /* FIXME: opregion/asle for VLV */ | 93 | /* FIXME: opregion/asle for VLV */ | |
94 | if (IS_VALLEYVIEW(dev)) | 94 | if (IS_VALLEYVIEW(dev)) | |
95 | return; | 95 | return; | |
96 | 96 | |||
97 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 97 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
98 | 98 | |||
99 | if (HAS_PCH_SPLIT(dev)) | 99 | if (HAS_PCH_SPLIT(dev)) | |
100 | ironlake_enable_display_irq(dev_priv, DE_GSE); | 100 | ironlake_enable_display_irq(dev_priv, DE_GSE); | |
101 | else { | 101 | else { | |
102 | i915_enable_pipestat(dev_priv, 1, | 102 | i915_enable_pipestat(dev_priv, 1, | |
103 | PIPE_LEGACY_BLC_EVENT_ENABLE); | 103 | PIPE_LEGACY_BLC_EVENT_ENABLE); | |
104 | if (INTEL_INFO(dev)->gen >= 4) | 104 | if (INTEL_INFO(dev)->gen >= 4) | |
105 | i915_enable_pipestat(dev_priv, 0, | 105 | i915_enable_pipestat(dev_priv, 0, | |
106 | PIPE_LEGACY_BLC_EVENT_ENABLE); | 106 | PIPE_LEGACY_BLC_EVENT_ENABLE); | |
107 | } | 107 | } | |
108 | 108 | |||
109 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 109 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
110 | } | 110 | } | |
111 | 111 | |||
112 | /** | 112 | /** | |
113 | * i915_pipe_enabled - check if a pipe is enabled | 113 | * i915_pipe_enabled - check if a pipe is enabled | |
114 | * @dev: DRM device | 114 | * @dev: DRM device | |
115 | * @pipe: pipe to check | 115 | * @pipe: pipe to check | |
116 | * | 116 | * | |
117 | * Reading certain registers when the pipe is disabled can hang the chip. | 117 | * Reading certain registers when the pipe is disabled can hang the chip. | |
118 | * Use this routine to make sure the PLL is running and the pipe is active | 118 | * Use this routine to make sure the PLL is running and the pipe is active | |
119 | * before reading such registers if unsure. | 119 | * before reading such registers if unsure. | |
120 | */ | 120 | */ | |
121 | static int | 121 | static int | |
122 | i915_pipe_enabled(struct drm_device *dev, int pipe) | 122 | i915_pipe_enabled(struct drm_device *dev, int pipe) | |
123 | { | 123 | { | |
124 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 124 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
125 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | 125 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | |
126 | pipe); | 126 | pipe); | |
127 | 127 | |||
128 | return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; | 128 | return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; | |
129 | } | 129 | } | |
130 | 130 | |||
131 | /* Called from drm generic code, passed a 'crtc', which | 131 | /* Called from drm generic code, passed a 'crtc', which | |
132 | * we use as a pipe index | 132 | * we use as a pipe index | |
133 | */ | 133 | */ | |
134 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | 134 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |
135 | { | 135 | { | |
136 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 136 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
137 | unsigned long high_frame; | 137 | unsigned long high_frame; | |
138 | unsigned long low_frame; | 138 | unsigned long low_frame; | |
139 | u32 high1, high2, low; | 139 | u32 high1, high2, low; | |
140 | 140 | |||
141 | if (!i915_pipe_enabled(dev, pipe)) { | 141 | if (!i915_pipe_enabled(dev, pipe)) { | |
142 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | 142 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | |
143 | "pipe %c\n", pipe_name(pipe)); | 143 | "pipe %c\n", pipe_name(pipe)); | |
144 | return 0; | 144 | return 0; | |
145 | } | 145 | } | |
146 | 146 | |||
147 | high_frame = PIPEFRAME(pipe); | 147 | high_frame = PIPEFRAME(pipe); | |
148 | low_frame = PIPEFRAMEPIXEL(pipe); | 148 | low_frame = PIPEFRAMEPIXEL(pipe); | |
149 | 149 | |||
150 | /* | 150 | /* | |
151 | * High & low register fields aren't synchronized, so make sure | 151 | * High & low register fields aren't synchronized, so make sure | |
152 | * we get a low value that's stable across two reads of the high | 152 | * we get a low value that's stable across two reads of the high | |
153 | * register. | 153 | * register. | |
154 | */ | 154 | */ | |
155 | do { | 155 | do { | |
156 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; | 156 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; | |
157 | low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; | 157 | low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; | |
158 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; | 158 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; | |
159 | } while (high1 != high2); | 159 | } while (high1 != high2); | |
160 | 160 | |||
161 | high1 >>= PIPE_FRAME_HIGH_SHIFT; | 161 | high1 >>= PIPE_FRAME_HIGH_SHIFT; | |
162 | low >>= PIPE_FRAME_LOW_SHIFT; | 162 | low >>= PIPE_FRAME_LOW_SHIFT; | |
163 | return (high1 << 8) | low; | 163 | return (high1 << 8) | low; | |
164 | } | 164 | } | |
165 | 165 | |||
166 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | 166 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | |
167 | { | 167 | { | |
168 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 168 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
169 | int reg = PIPE_FRMCOUNT_GM45(pipe); | 169 | int reg = PIPE_FRMCOUNT_GM45(pipe); | |
170 | 170 | |||
171 | if (!i915_pipe_enabled(dev, pipe)) { | 171 | if (!i915_pipe_enabled(dev, pipe)) { | |
172 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | 172 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | |
173 | "pipe %c\n", pipe_name(pipe)); | 173 | "pipe %c\n", pipe_name(pipe)); | |
174 | return 0; | 174 | return 0; | |
175 | } | 175 | } | |
176 | 176 | |||
177 | return I915_READ(reg); | 177 | return I915_READ(reg); | |
178 | } | 178 | } | |
179 | 179 | |||
180 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | 180 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | |
181 | int *vpos, int *hpos) | 181 | int *vpos, int *hpos) | |
182 | { | 182 | { | |
183 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 183 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
184 | u32 vbl = 0, position = 0; | 184 | u32 vbl = 0, position = 0; | |
185 | int vbl_start, vbl_end, htotal, vtotal; | 185 | int vbl_start, vbl_end, htotal, vtotal; | |
186 | bool in_vbl = true; | 186 | bool in_vbl = true; | |
187 | int ret = 0; | 187 | int ret = 0; | |
188 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | 188 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | |
189 | pipe); | 189 | pipe); | |
190 | 190 | |||
191 | if (!i915_pipe_enabled(dev, pipe)) { | 191 | if (!i915_pipe_enabled(dev, pipe)) { | |
192 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | 192 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | |
193 | "pipe %c\n", pipe_name(pipe)); | 193 | "pipe %c\n", pipe_name(pipe)); | |
194 | return 0; | 194 | return 0; | |
195 | } | 195 | } | |
196 | 196 | |||
197 | /* Get vtotal. */ | 197 | /* Get vtotal. */ | |
198 | vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); | 198 | vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); | |
199 | 199 | |||
200 | if (INTEL_INFO(dev)->gen >= 4) { | 200 | if (INTEL_INFO(dev)->gen >= 4) { | |
201 | /* No obvious pixelcount register. Only query vertical | 201 | /* No obvious pixelcount register. Only query vertical | |
202 | * scanout position from Display scan line register. | 202 | * scanout position from Display scan line register. | |
203 | */ | 203 | */ | |
204 | position = I915_READ(PIPEDSL(pipe)); | 204 | position = I915_READ(PIPEDSL(pipe)); | |
205 | 205 | |||
206 | /* Decode into vertical scanout position. Don't have | 206 | /* Decode into vertical scanout position. Don't have | |
207 | * horizontal scanout position. | 207 | * horizontal scanout position. | |
208 | */ | 208 | */ | |
209 | *vpos = position & 0x1fff; | 209 | *vpos = position & 0x1fff; | |
210 | *hpos = 0; | 210 | *hpos = 0; | |
211 | } else { | 211 | } else { | |
212 | /* Have access to pixelcount since start of frame. | 212 | /* Have access to pixelcount since start of frame. | |
213 | * We can split this into vertical and horizontal | 213 | * We can split this into vertical and horizontal | |
214 | * scanout position. | 214 | * scanout position. | |
215 | */ | 215 | */ | |
216 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; | 216 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; | |
217 | 217 | |||
218 | htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); | 218 | htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); | |
219 | *vpos = position / htotal; | 219 | *vpos = position / htotal; | |
220 | *hpos = position - (*vpos * htotal); | 220 | *hpos = position - (*vpos * htotal); | |
221 | } | 221 | } | |
222 | 222 | |||
223 | /* Query vblank area. */ | 223 | /* Query vblank area. */ | |
224 | vbl = I915_READ(VBLANK(cpu_transcoder)); | 224 | vbl = I915_READ(VBLANK(cpu_transcoder)); | |
225 | 225 | |||
226 | /* Test position against vblank region. */ | 226 | /* Test position against vblank region. */ | |
227 | vbl_start = vbl & 0x1fff; | 227 | vbl_start = vbl & 0x1fff; | |
228 | vbl_end = (vbl >> 16) & 0x1fff; | 228 | vbl_end = (vbl >> 16) & 0x1fff; | |
229 | 229 | |||
230 | if ((*vpos < vbl_start) || (*vpos > vbl_end)) | 230 | if ((*vpos < vbl_start) || (*vpos > vbl_end)) | |
231 | in_vbl = false; | 231 | in_vbl = false; | |
232 | 232 | |||
233 | /* Inside "upper part" of vblank area? Apply corrective offset: */ | 233 | /* Inside "upper part" of vblank area? Apply corrective offset: */ | |
234 | if (in_vbl && (*vpos >= vbl_start)) | 234 | if (in_vbl && (*vpos >= vbl_start)) | |
235 | *vpos = *vpos - vtotal; | 235 | *vpos = *vpos - vtotal; | |
236 | 236 | |||
237 | /* Readouts valid? */ | 237 | /* Readouts valid? */ | |
238 | if (vbl > 0) | 238 | if (vbl > 0) | |
239 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; | 239 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; | |
240 | 240 | |||
241 | /* In vblank? */ | 241 | /* In vblank? */ | |
242 | if (in_vbl) | 242 | if (in_vbl) | |
243 | ret |= DRM_SCANOUTPOS_INVBL; | 243 | ret |= DRM_SCANOUTPOS_INVBL; | |
244 | 244 | |||
245 | return ret; | 245 | return ret; | |
246 | } | 246 | } | |
247 | 247 | |||
248 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, | 248 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, | |
249 | int *max_error, | 249 | int *max_error, | |
250 | struct timeval *vblank_time, | 250 | struct timeval *vblank_time, | |
251 | unsigned flags) | 251 | unsigned flags) | |
252 | { | 252 | { | |
253 | struct drm_i915_private *dev_priv = dev->dev_private; | 253 | struct drm_i915_private *dev_priv = dev->dev_private; | |
254 | struct drm_crtc *crtc; | 254 | struct drm_crtc *crtc; | |
255 | 255 | |||
256 | if (pipe < 0 || pipe >= dev_priv->num_pipe) { | 256 | if (pipe < 0 || pipe >= dev_priv->num_pipe) { | |
257 | DRM_ERROR("Invalid crtc %d\n", pipe); | 257 | DRM_ERROR("Invalid crtc %d\n", pipe); | |
258 | return -EINVAL; | 258 | return -EINVAL; | |
259 | } | 259 | } | |
260 | 260 | |||
261 | /* Get drm_crtc to timestamp: */ | 261 | /* Get drm_crtc to timestamp: */ | |
262 | crtc = intel_get_crtc_for_pipe(dev, pipe); | 262 | crtc = intel_get_crtc_for_pipe(dev, pipe); | |
263 | if (crtc == NULL) { | 263 | if (crtc == NULL) { | |
264 | DRM_ERROR("Invalid crtc %d\n", pipe); | 264 | DRM_ERROR("Invalid crtc %d\n", pipe); | |
265 | return -EINVAL; | 265 | return -EINVAL; | |
266 | } | 266 | } | |
267 | 267 | |||
268 | if (!crtc->enabled) { | 268 | if (!crtc->enabled) { | |
269 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | 269 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | |
270 | return -EBUSY; | 270 | return -EBUSY; | |
271 | } | 271 | } | |
272 | 272 | |||
273 | /* Helper routine in DRM core does all the work: */ | 273 | /* Helper routine in DRM core does all the work: */ | |
274 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, | 274 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, | |
275 | vblank_time, flags, | 275 | vblank_time, flags, | |
276 | crtc); | 276 | crtc); | |
277 | } | 277 | } | |
278 | 278 | |||
279 | /* | 279 | /* | |
280 | * Handle hotplug events outside the interrupt handler proper. | 280 | * Handle hotplug events outside the interrupt handler proper. | |
281 | */ | 281 | */ | |
282 | static void i915_hotplug_work_func(struct work_struct *work) | 282 | static void i915_hotplug_work_func(struct work_struct *work) | |
283 | { | 283 | { | |
284 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 284 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | |
285 | hotplug_work); | 285 | hotplug_work); | |
286 | struct drm_device *dev = dev_priv->dev; | 286 | struct drm_device *dev = dev_priv->dev; | |
287 | struct drm_mode_config *mode_config = &dev->mode_config; | 287 | struct drm_mode_config *mode_config = &dev->mode_config; | |
288 | struct intel_encoder *encoder; | 288 | struct intel_encoder *encoder; | |
289 | 289 | |||
290 | mutex_lock(&mode_config->mutex); | 290 | mutex_lock(&mode_config->mutex); | |
291 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); | 291 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); | |
292 | 292 | |||
293 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) | 293 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) | |
294 | if (encoder->hot_plug) | 294 | if (encoder->hot_plug) | |
295 | encoder->hot_plug(encoder); | 295 | encoder->hot_plug(encoder); | |
296 | 296 | |||
297 | mutex_unlock(&mode_config->mutex); | 297 | mutex_unlock(&mode_config->mutex); | |
298 | 298 | |||
299 | /* Just fire off a uevent and let userspace tell us what to do */ | 299 | /* Just fire off a uevent and let userspace tell us what to do */ | |
300 | drm_helper_hpd_irq_event(dev); | 300 | drm_helper_hpd_irq_event(dev); | |
301 | } | 301 | } | |
302 | 302 | |||
303 | /* defined intel_pm.c */ | 303 | /* defined intel_pm.c */ | |
304 | extern spinlock_t mchdev_lock; | 304 | extern spinlock_t mchdev_lock; | |
305 | 305 | |||
306 | static void ironlake_handle_rps_change(struct drm_device *dev) | 306 | static void ironlake_handle_rps_change(struct drm_device *dev) | |
307 | { | 307 | { | |
308 | drm_i915_private_t *dev_priv = dev->dev_private; | 308 | drm_i915_private_t *dev_priv = dev->dev_private; | |
309 | u32 busy_up, busy_down, max_avg, min_avg; | 309 | u32 busy_up, busy_down, max_avg, min_avg; | |
310 | u8 new_delay; | 310 | u8 new_delay; | |
311 | unsigned long flags; | 311 | unsigned long flags; | |
312 | 312 | |||
313 | spin_lock_irqsave(&mchdev_lock, flags); | 313 | spin_lock_irqsave(&mchdev_lock, flags); | |
314 | 314 | |||
315 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); | 315 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); | |
316 | 316 | |||
317 | new_delay = dev_priv->ips.cur_delay; | 317 | new_delay = dev_priv->ips.cur_delay; | |
318 | 318 | |||
319 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); | 319 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); | |
320 | busy_up = I915_READ(RCPREVBSYTUPAVG); | 320 | busy_up = I915_READ(RCPREVBSYTUPAVG); | |
321 | busy_down = I915_READ(RCPREVBSYTDNAVG); | 321 | busy_down = I915_READ(RCPREVBSYTDNAVG); | |
322 | max_avg = I915_READ(RCBMAXAVG); | 322 | max_avg = I915_READ(RCBMAXAVG); | |
323 | min_avg = I915_READ(RCBMINAVG); | 323 | min_avg = I915_READ(RCBMINAVG); | |
324 | 324 | |||
325 | /* Handle RCS change request from hw */ | 325 | /* Handle RCS change request from hw */ | |
326 | if (busy_up > max_avg) { | 326 | if (busy_up > max_avg) { | |
327 | if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) | 327 | if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) | |
328 | new_delay = dev_priv->ips.cur_delay - 1; | 328 | new_delay = dev_priv->ips.cur_delay - 1; | |
329 | if (new_delay < dev_priv->ips.max_delay) | 329 | if (new_delay < dev_priv->ips.max_delay) | |
330 | new_delay = dev_priv->ips.max_delay; | 330 | new_delay = dev_priv->ips.max_delay; | |
331 | } else if (busy_down < min_avg) { | 331 | } else if (busy_down < min_avg) { | |
332 | if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) | 332 | if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) | |
333 | new_delay = dev_priv->ips.cur_delay + 1; | 333 | new_delay = dev_priv->ips.cur_delay + 1; | |
334 | if (new_delay > dev_priv->ips.min_delay) | 334 | if (new_delay > dev_priv->ips.min_delay) | |
335 | new_delay = dev_priv->ips.min_delay; | 335 | new_delay = dev_priv->ips.min_delay; | |
336 | } | 336 | } | |
337 | 337 | |||
338 | if (ironlake_set_drps(dev, new_delay)) | 338 | if (ironlake_set_drps(dev, new_delay)) | |
339 | dev_priv->ips.cur_delay = new_delay; | 339 | dev_priv->ips.cur_delay = new_delay; | |
340 | 340 | |||
341 | spin_unlock_irqrestore(&mchdev_lock, flags); | 341 | spin_unlock_irqrestore(&mchdev_lock, flags); | |
342 | 342 | |||
343 | return; | 343 | return; | |
344 | } | 344 | } | |
345 | 345 | |||
346 | static void notify_ring(struct drm_device *dev, | 346 | static void notify_ring(struct drm_device *dev, | |
347 | struct intel_ring_buffer *ring) | 347 | struct intel_ring_buffer *ring) | |
348 | { | 348 | { | |
349 | struct drm_i915_private *dev_priv = dev->dev_private; | 349 | struct drm_i915_private *dev_priv = dev->dev_private; | |
350 | 350 | |||
351 | if (ring->obj == NULL) | 351 | if (ring->obj == NULL) | |
352 | return; | 352 | return; | |
353 | 353 | |||
354 | trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); | 354 | trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); | |
355 | 355 | |||
356 | #ifdef __NetBSD__ | |||
357 | DRM_WAKEUP_ALL(&ring->irq_queue, &drm_global_mutex); | |||
358 | #else | |||
356 | wake_up_all(&ring->irq_queue); | 359 | wake_up_all(&ring->irq_queue); | |
360 | #endif | |||
357 | if (i915_enable_hangcheck) { | 361 | if (i915_enable_hangcheck) { | |
358 | dev_priv->hangcheck_count = 0; | 362 | dev_priv->hangcheck_count = 0; | |
359 | mod_timer(&dev_priv->hangcheck_timer, | 363 | mod_timer(&dev_priv->hangcheck_timer, | |
360 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | 364 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | |
361 | } | 365 | } | |
362 | } | 366 | } | |
363 | 367 | |||
364 | static void gen6_pm_rps_work(struct work_struct *work) | 368 | static void gen6_pm_rps_work(struct work_struct *work) | |
365 | { | 369 | { | |
366 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 370 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | |
367 | rps.work); | 371 | rps.work); | |
368 | u32 pm_iir, pm_imr; | 372 | u32 pm_iir, pm_imr; | |
369 | u8 new_delay; | 373 | u8 new_delay; | |
370 | 374 | |||
371 | spin_lock_irq(&dev_priv->rps.lock); | 375 | spin_lock_irq(&dev_priv->rps.lock); | |
372 | pm_iir = dev_priv->rps.pm_iir; | 376 | pm_iir = dev_priv->rps.pm_iir; | |
373 | dev_priv->rps.pm_iir = 0; | 377 | dev_priv->rps.pm_iir = 0; | |
374 | pm_imr = I915_READ(GEN6_PMIMR); | 378 | pm_imr = I915_READ(GEN6_PMIMR); | |
375 | I915_WRITE(GEN6_PMIMR, 0); | 379 | I915_WRITE(GEN6_PMIMR, 0); | |
376 | spin_unlock_irq(&dev_priv->rps.lock); | 380 | spin_unlock_irq(&dev_priv->rps.lock); | |
377 | 381 | |||
378 | if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) | 382 | if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) | |
379 | return; | 383 | return; | |
380 | 384 | |||
381 | mutex_lock(&dev_priv->rps.hw_lock); | 385 | mutex_lock(&dev_priv->rps.hw_lock); | |
382 | 386 | |||
383 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) | 387 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) | |
384 | new_delay = dev_priv->rps.cur_delay + 1; | 388 | new_delay = dev_priv->rps.cur_delay + 1; | |
385 | else | 389 | else | |
386 | new_delay = dev_priv->rps.cur_delay - 1; | 390 | new_delay = dev_priv->rps.cur_delay - 1; | |
387 | 391 | |||
388 | /* sysfs frequency interfaces may have snuck in while servicing the | 392 | /* sysfs frequency interfaces may have snuck in while servicing the | |
389 | * interrupt | 393 | * interrupt | |
390 | */ | 394 | */ | |
391 | if (!(new_delay > dev_priv->rps.max_delay || | 395 | if (!(new_delay > dev_priv->rps.max_delay || | |
392 | new_delay < dev_priv->rps.min_delay)) { | 396 | new_delay < dev_priv->rps.min_delay)) { | |
393 | gen6_set_rps(dev_priv->dev, new_delay); | 397 | gen6_set_rps(dev_priv->dev, new_delay); | |
394 | } | 398 | } | |
395 | 399 | |||
396 | mutex_unlock(&dev_priv->rps.hw_lock); | 400 | mutex_unlock(&dev_priv->rps.hw_lock); | |
397 | } | 401 | } | |
398 | 402 | |||
399 | 403 | |||
400 | /** | 404 | /** | |
401 | * ivybridge_parity_work - Workqueue called when a parity error interrupt | 405 | * ivybridge_parity_work - Workqueue called when a parity error interrupt | |
402 | * occurred. | 406 | * occurred. | |
403 | * @work: workqueue struct | 407 | * @work: workqueue struct | |
404 | * | 408 | * | |
405 | * Doesn't actually do anything except notify userspace. As a consequence of | 409 | * Doesn't actually do anything except notify userspace. As a consequence of | |
406 | * this event, userspace should try to remap the bad rows since statistically | 410 | * this event, userspace should try to remap the bad rows since statistically | |
407 | * it is likely the same row is more likely to go bad again. | 411 | * it is likely the same row is more likely to go bad again. | |
408 | */ | 412 | */ | |
409 | static void ivybridge_parity_work(struct work_struct *work) | 413 | static void ivybridge_parity_work(struct work_struct *work) | |
410 | { | 414 | { | |
411 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 415 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | |
412 | l3_parity.error_work); | 416 | l3_parity.error_work); | |
413 | u32 error_status, row, bank, subbank; | 417 | u32 error_status, row, bank, subbank; | |
414 | char *parity_event[5]; | 418 | char *parity_event[5]; | |
415 | uint32_t misccpctl; | 419 | uint32_t misccpctl; | |
416 | unsigned long flags; | 420 | unsigned long flags; | |
417 | 421 | |||
418 | /* We must turn off DOP level clock gating to access the L3 registers. | 422 | /* We must turn off DOP level clock gating to access the L3 registers. | |
419 | * In order to prevent a get/put style interface, acquire struct mutex | 423 | * In order to prevent a get/put style interface, acquire struct mutex | |
420 | * any time we access those registers. | 424 | * any time we access those registers. | |
421 | */ | 425 | */ | |
422 | mutex_lock(&dev_priv->dev->struct_mutex); | 426 | mutex_lock(&dev_priv->dev->struct_mutex); | |
423 | 427 | |||
424 | misccpctl = I915_READ(GEN7_MISCCPCTL); | 428 | misccpctl = I915_READ(GEN7_MISCCPCTL); | |
425 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | 429 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | |
426 | POSTING_READ(GEN7_MISCCPCTL); | 430 | POSTING_READ(GEN7_MISCCPCTL); | |
427 | 431 | |||
428 | error_status = I915_READ(GEN7_L3CDERRST1); | 432 | error_status = I915_READ(GEN7_L3CDERRST1); | |
429 | row = GEN7_PARITY_ERROR_ROW(error_status); | 433 | row = GEN7_PARITY_ERROR_ROW(error_status); | |
430 | bank = GEN7_PARITY_ERROR_BANK(error_status); | 434 | bank = GEN7_PARITY_ERROR_BANK(error_status); | |
431 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); | 435 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); | |
432 | 436 | |||
433 | I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | | 437 | I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | | |
434 | GEN7_L3CDERRST1_ENABLE); | 438 | GEN7_L3CDERRST1_ENABLE); | |
435 | POSTING_READ(GEN7_L3CDERRST1); | 439 | POSTING_READ(GEN7_L3CDERRST1); | |
436 | 440 | |||
437 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); | 441 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); | |
438 | 442 | |||
439 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 443 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
440 | dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; | 444 | dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; | |
441 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 445 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
442 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 446 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
443 | 447 | |||
444 | mutex_unlock(&dev_priv->dev->struct_mutex); | 448 | mutex_unlock(&dev_priv->dev->struct_mutex); | |
445 | 449 | |||
446 | parity_event[0] = "L3_PARITY_ERROR=1"; | 450 | parity_event[0] = "L3_PARITY_ERROR=1"; | |
447 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); | 451 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); | |
448 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); | 452 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); | |
449 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); | 453 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); | |
450 | parity_event[4] = NULL; | 454 | parity_event[4] = NULL; | |
451 | 455 | |||
452 | kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, | 456 | kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, | |
453 | KOBJ_CHANGE, parity_event); | 457 | KOBJ_CHANGE, parity_event); | |
454 | 458 | |||
455 | DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", | 459 | DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", | |
456 | row, bank, subbank); | 460 | row, bank, subbank); | |
457 | 461 | |||
458 | kfree(parity_event[3]); | 462 | kfree(parity_event[3]); | |
459 | kfree(parity_event[2]); | 463 | kfree(parity_event[2]); | |
460 | kfree(parity_event[1]); | 464 | kfree(parity_event[1]); | |
461 | } | 465 | } | |
462 | 466 | |||
463 | static void ivybridge_handle_parity_error(struct drm_device *dev) | 467 | static void ivybridge_handle_parity_error(struct drm_device *dev) | |
464 | { | 468 | { | |
465 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 469 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
466 | unsigned long flags; | 470 | unsigned long flags; | |
467 | 471 | |||
468 | if (!HAS_L3_GPU_CACHE(dev)) | 472 | if (!HAS_L3_GPU_CACHE(dev)) | |
469 | return; | 473 | return; | |
470 | 474 | |||
471 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 475 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
472 | dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; | 476 | dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; | |
473 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 477 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
474 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 478 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
475 | 479 | |||
476 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); | 480 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); | |
477 | } | 481 | } | |
478 | 482 | |||
479 | static void snb_gt_irq_handler(struct drm_device *dev, | 483 | static void snb_gt_irq_handler(struct drm_device *dev, | |
480 | struct drm_i915_private *dev_priv, | 484 | struct drm_i915_private *dev_priv, | |
481 | u32 gt_iir) | 485 | u32 gt_iir) | |
482 | { | 486 | { | |
483 | 487 | |||
484 | if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | | 488 | if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | | |
485 | GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) | 489 | GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) | |
486 | notify_ring(dev, &dev_priv->ring[RCS]); | 490 | notify_ring(dev, &dev_priv->ring[RCS]); | |
487 | if (gt_iir & GEN6_BSD_USER_INTERRUPT) | 491 | if (gt_iir & GEN6_BSD_USER_INTERRUPT) | |
488 | notify_ring(dev, &dev_priv->ring[VCS]); | 492 | notify_ring(dev, &dev_priv->ring[VCS]); | |
489 | if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) | 493 | if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) | |
490 | notify_ring(dev, &dev_priv->ring[BCS]); | 494 | notify_ring(dev, &dev_priv->ring[BCS]); | |
491 | 495 | |||
492 | if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | | 496 | if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | | |
493 | GT_GEN6_BSD_CS_ERROR_INTERRUPT | | 497 | GT_GEN6_BSD_CS_ERROR_INTERRUPT | | |
494 | GT_RENDER_CS_ERROR_INTERRUPT)) { | 498 | GT_RENDER_CS_ERROR_INTERRUPT)) { | |
495 | DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); | 499 | DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); | |
496 | i915_handle_error(dev, false); | 500 | i915_handle_error(dev, false); | |
497 | } | 501 | } | |
498 | 502 | |||
499 | if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) | 503 | if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) | |
500 | ivybridge_handle_parity_error(dev); | 504 | ivybridge_handle_parity_error(dev); | |
501 | } | 505 | } | |
502 | 506 | |||
503 | static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, | 507 | static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, | |
504 | u32 pm_iir) | 508 | u32 pm_iir) | |
505 | { | 509 | { | |
506 | unsigned long flags; | 510 | unsigned long flags; | |
507 | 511 | |||
508 | /* | 512 | /* | |
509 | * IIR bits should never already be set because IMR should | 513 | * IIR bits should never already be set because IMR should | |
510 | * prevent an interrupt from being shown in IIR. The warning | 514 | * prevent an interrupt from being shown in IIR. The warning | |
511 | * displays a case where we've unsafely cleared | 515 | * displays a case where we've unsafely cleared | |
512 | * dev_priv->rps.pm_iir. Although missing an interrupt of the same | 516 | * dev_priv->rps.pm_iir. Although missing an interrupt of the same | |
513 | * type is not a problem, it displays a problem in the logic. | 517 | * type is not a problem, it displays a problem in the logic. | |
514 | * | 518 | * | |
515 | * The mask bit in IMR is cleared by dev_priv->rps.work. | 519 | * The mask bit in IMR is cleared by dev_priv->rps.work. | |
516 | */ | 520 | */ | |
517 | 521 | |||
518 | spin_lock_irqsave(&dev_priv->rps.lock, flags); | 522 | spin_lock_irqsave(&dev_priv->rps.lock, flags); | |
519 | dev_priv->rps.pm_iir |= pm_iir; | 523 | dev_priv->rps.pm_iir |= pm_iir; | |
520 | I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); | 524 | I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); | |
521 | POSTING_READ(GEN6_PMIMR); | 525 | POSTING_READ(GEN6_PMIMR); | |
522 | spin_unlock_irqrestore(&dev_priv->rps.lock, flags); | 526 | spin_unlock_irqrestore(&dev_priv->rps.lock, flags); | |
523 | 527 | |||
524 | queue_work(dev_priv->wq, &dev_priv->rps.work); | 528 | queue_work(dev_priv->wq, &dev_priv->rps.work); | |
525 | } | 529 | } | |
526 | 530 | |||
527 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) | 531 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) | |
528 | { | 532 | { | |
529 | struct drm_device *dev = (struct drm_device *) arg; | 533 | struct drm_device *dev = (struct drm_device *) arg; | |
530 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 534 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
531 | u32 iir, gt_iir, pm_iir; | 535 | u32 iir, gt_iir, pm_iir; | |
532 | irqreturn_t ret = IRQ_NONE; | 536 | irqreturn_t ret = IRQ_NONE; | |
533 | unsigned long irqflags; | 537 | unsigned long irqflags; | |
534 | int pipe; | 538 | int pipe; | |
535 | u32 pipe_stats[I915_MAX_PIPES]; | 539 | u32 pipe_stats[I915_MAX_PIPES]; | |
536 | bool blc_event; | 540 | bool blc_event; | |
537 | 541 | |||
538 | atomic_inc(&dev_priv->irq_received); | 542 | atomic_inc(&dev_priv->irq_received); | |
539 | 543 | |||
540 | while (true) { | 544 | while (true) { | |
541 | iir = I915_READ(VLV_IIR); | 545 | iir = I915_READ(VLV_IIR); | |
542 | gt_iir = I915_READ(GTIIR); | 546 | gt_iir = I915_READ(GTIIR); | |
543 | pm_iir = I915_READ(GEN6_PMIIR); | 547 | pm_iir = I915_READ(GEN6_PMIIR); | |
544 | 548 | |||
545 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) | 549 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) | |
546 | goto out; | 550 | goto out; | |
547 | 551 | |||
548 | ret = IRQ_HANDLED; | 552 | ret = IRQ_HANDLED; | |
549 | 553 | |||
550 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | 554 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | |
551 | 555 | |||
552 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 556 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
553 | for_each_pipe(pipe) { | 557 | for_each_pipe(pipe) { | |
554 | int reg = PIPESTAT(pipe); | 558 | int reg = PIPESTAT(pipe); | |
555 | pipe_stats[pipe] = I915_READ(reg); | 559 | pipe_stats[pipe] = I915_READ(reg); | |
556 | 560 | |||
557 | /* | 561 | /* | |
558 | * Clear the PIPE*STAT regs before the IIR | 562 | * Clear the PIPE*STAT regs before the IIR | |
559 | */ | 563 | */ | |
560 | if (pipe_stats[pipe] & 0x8000ffff) { | 564 | if (pipe_stats[pipe] & 0x8000ffff) { | |
561 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | 565 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
562 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | 566 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
563 | pipe_name(pipe)); | 567 | pipe_name(pipe)); | |
564 | I915_WRITE(reg, pipe_stats[pipe]); | 568 | I915_WRITE(reg, pipe_stats[pipe]); | |
565 | } | 569 | } | |
566 | } | 570 | } | |
567 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 571 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
568 | 572 | |||
569 | for_each_pipe(pipe) { | 573 | for_each_pipe(pipe) { | |
570 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) | 574 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) | |
571 | drm_handle_vblank(dev, pipe); | 575 | drm_handle_vblank(dev, pipe); | |
572 | 576 | |||
573 | if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { | 577 | if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { | |
574 | intel_prepare_page_flip(dev, pipe); | 578 | intel_prepare_page_flip(dev, pipe); | |
575 | intel_finish_page_flip(dev, pipe); | 579 | intel_finish_page_flip(dev, pipe); | |
576 | } | 580 | } | |
577 | } | 581 | } | |
578 | 582 | |||
579 | /* Consume port. Then clear IIR or we'll miss events */ | 583 | /* Consume port. Then clear IIR or we'll miss events */ | |
580 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { | 584 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { | |
581 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 585 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | |
582 | 586 | |||
583 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | 587 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | |
584 | hotplug_status); | 588 | hotplug_status); | |
585 | if (hotplug_status & dev_priv->hotplug_supported_mask) | 589 | if (hotplug_status & dev_priv->hotplug_supported_mask) | |
586 | queue_work(dev_priv->wq, | 590 | queue_work(dev_priv->wq, | |
587 | &dev_priv->hotplug_work); | 591 | &dev_priv->hotplug_work); | |
588 | 592 | |||
589 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 593 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | |
590 | I915_READ(PORT_HOTPLUG_STAT); | 594 | I915_READ(PORT_HOTPLUG_STAT); | |
591 | } | 595 | } | |
592 | 596 | |||
593 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | 597 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
594 | blc_event = true; | 598 | blc_event = true; | |
595 | 599 | |||
596 | if (pm_iir & GEN6_PM_DEFERRED_EVENTS) | 600 | if (pm_iir & GEN6_PM_DEFERRED_EVENTS) | |
597 | gen6_queue_rps_work(dev_priv, pm_iir); | 601 | gen6_queue_rps_work(dev_priv, pm_iir); | |
598 | 602 | |||
599 | I915_WRITE(GTIIR, gt_iir); | 603 | I915_WRITE(GTIIR, gt_iir); | |
600 | I915_WRITE(GEN6_PMIIR, pm_iir); | 604 | I915_WRITE(GEN6_PMIIR, pm_iir); | |
601 | I915_WRITE(VLV_IIR, iir); | 605 | I915_WRITE(VLV_IIR, iir); | |
602 | } | 606 | } | |
603 | 607 | |||
604 | out: | 608 | out: | |
605 | return ret; | 609 | return ret; | |
606 | } | 610 | } | |
607 | 611 | |||
608 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) | 612 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) | |
609 | { | 613 | { | |
610 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 614 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
611 | int pipe; | 615 | int pipe; | |
612 | 616 | |||
613 | if (pch_iir & SDE_HOTPLUG_MASK) | 617 | if (pch_iir & SDE_HOTPLUG_MASK) | |
614 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 618 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | |
615 | 619 | |||
616 | if (pch_iir & SDE_AUDIO_POWER_MASK) | 620 | if (pch_iir & SDE_AUDIO_POWER_MASK) | |
617 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | 621 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | |
618 | (pch_iir & SDE_AUDIO_POWER_MASK) >> | 622 | (pch_iir & SDE_AUDIO_POWER_MASK) >> | |
619 | SDE_AUDIO_POWER_SHIFT); | 623 | SDE_AUDIO_POWER_SHIFT); | |
620 | 624 | |||
621 | if (pch_iir & SDE_GMBUS) | 625 | if (pch_iir & SDE_GMBUS) | |
622 | DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); | 626 | DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); | |
623 | 627 | |||
624 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | 628 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | |
625 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | 629 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | |
626 | 630 | |||
627 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | 631 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | |
628 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | 632 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | |
629 | 633 | |||
630 | if (pch_iir & SDE_POISON) | 634 | if (pch_iir & SDE_POISON) | |
631 | DRM_ERROR("PCH poison interrupt\n"); | 635 | DRM_ERROR("PCH poison interrupt\n"); | |
632 | 636 | |||
633 | if (pch_iir & SDE_FDI_MASK) | 637 | if (pch_iir & SDE_FDI_MASK) | |
634 | for_each_pipe(pipe) | 638 | for_each_pipe(pipe) | |
635 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | 639 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | |
636 | pipe_name(pipe), | 640 | pipe_name(pipe), | |
637 | I915_READ(FDI_RX_IIR(pipe))); | 641 | I915_READ(FDI_RX_IIR(pipe))); | |
638 | 642 | |||
639 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | 643 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | |
640 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | 644 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | |
641 | 645 | |||
642 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | 646 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | |
643 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | 647 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | |
644 | 648 | |||
645 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | 649 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | |
646 | DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); | 650 | DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); | |
647 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) | 651 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) | |
648 | DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); | 652 | DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); | |
649 | } | 653 | } | |
650 | 654 | |||
651 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) | 655 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) | |
652 | { | 656 | { | |
653 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 657 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
654 | int pipe; | 658 | int pipe; | |
655 | 659 | |||
656 | if (pch_iir & SDE_HOTPLUG_MASK_CPT) | 660 | if (pch_iir & SDE_HOTPLUG_MASK_CPT) | |
657 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 661 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | |
658 | 662 | |||
659 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) | 663 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) | |
660 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | 664 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | |
661 | (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | 665 | (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | |
662 | SDE_AUDIO_POWER_SHIFT_CPT); | 666 | SDE_AUDIO_POWER_SHIFT_CPT); | |
663 | 667 | |||
664 | if (pch_iir & SDE_AUX_MASK_CPT) | 668 | if (pch_iir & SDE_AUX_MASK_CPT) | |
665 | DRM_DEBUG_DRIVER("AUX channel interrupt\n"); | 669 | DRM_DEBUG_DRIVER("AUX channel interrupt\n"); | |
666 | 670 | |||
667 | if (pch_iir & SDE_GMBUS_CPT) | 671 | if (pch_iir & SDE_GMBUS_CPT) | |
668 | DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); | 672 | DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); | |
669 | 673 | |||
670 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) | 674 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) | |
671 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); | 675 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); | |
672 | 676 | |||
673 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) | 677 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) | |
674 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); | 678 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); | |
675 | 679 | |||
676 | if (pch_iir & SDE_FDI_MASK_CPT) | 680 | if (pch_iir & SDE_FDI_MASK_CPT) | |
677 | for_each_pipe(pipe) | 681 | for_each_pipe(pipe) | |
678 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | 682 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | |
679 | pipe_name(pipe), | 683 | pipe_name(pipe), | |
680 | I915_READ(FDI_RX_IIR(pipe))); | 684 | I915_READ(FDI_RX_IIR(pipe))); | |
681 | } | 685 | } | |
682 | 686 | |||
683 | static irqreturn_t ivybridge_irq_handler(int irq, void *arg) | 687 | static irqreturn_t ivybridge_irq_handler(int irq, void *arg) | |
684 | { | 688 | { | |
685 | struct drm_device *dev = (struct drm_device *) arg; | 689 | struct drm_device *dev = (struct drm_device *) arg; | |
686 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 690 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
687 | u32 de_iir, gt_iir, de_ier, pm_iir; | 691 | u32 de_iir, gt_iir, de_ier, pm_iir; | |
688 | irqreturn_t ret = IRQ_NONE; | 692 | irqreturn_t ret = IRQ_NONE; | |
689 | int i; | 693 | int i; | |
690 | 694 | |||
691 | atomic_inc(&dev_priv->irq_received); | 695 | atomic_inc(&dev_priv->irq_received); | |
692 | 696 | |||
693 | /* disable master interrupt before clearing iir */ | 697 | /* disable master interrupt before clearing iir */ | |
694 | de_ier = I915_READ(DEIER); | 698 | de_ier = I915_READ(DEIER); | |
695 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | 699 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | |
696 | 700 | |||
697 | gt_iir = I915_READ(GTIIR); | 701 | gt_iir = I915_READ(GTIIR); | |
698 | if (gt_iir) { | 702 | if (gt_iir) { | |
699 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | 703 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | |
700 | I915_WRITE(GTIIR, gt_iir); | 704 | I915_WRITE(GTIIR, gt_iir); | |
701 | ret = IRQ_HANDLED; | 705 | ret = IRQ_HANDLED; | |
702 | } | 706 | } | |
703 | 707 | |||
704 | de_iir = I915_READ(DEIIR); | 708 | de_iir = I915_READ(DEIIR); | |
705 | if (de_iir) { | 709 | if (de_iir) { | |
706 | if (de_iir & DE_GSE_IVB) | 710 | if (de_iir & DE_GSE_IVB) | |
707 | intel_opregion_gse_intr(dev); | 711 | intel_opregion_gse_intr(dev); | |
708 | 712 | |||
709 | for (i = 0; i < 3; i++) { | 713 | for (i = 0; i < 3; i++) { | |
710 | if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) | 714 | if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) | |
711 | drm_handle_vblank(dev, i); | 715 | drm_handle_vblank(dev, i); | |
712 | if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { | 716 | if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { | |
713 | intel_prepare_page_flip(dev, i); | 717 | intel_prepare_page_flip(dev, i); | |
714 | intel_finish_page_flip_plane(dev, i); | 718 | intel_finish_page_flip_plane(dev, i); | |
715 | } | 719 | } | |
716 | } | 720 | } | |
717 | 721 | |||
718 | /* check event from PCH */ | 722 | /* check event from PCH */ | |
719 | if (de_iir & DE_PCH_EVENT_IVB) { | 723 | if (de_iir & DE_PCH_EVENT_IVB) { | |
720 | u32 pch_iir = I915_READ(SDEIIR); | 724 | u32 pch_iir = I915_READ(SDEIIR); | |
721 | 725 | |||
722 | cpt_irq_handler(dev, pch_iir); | 726 | cpt_irq_handler(dev, pch_iir); | |
723 | 727 | |||
724 | /* clear PCH hotplug event before clear CPU irq */ | 728 | /* clear PCH hotplug event before clear CPU irq */ | |
725 | I915_WRITE(SDEIIR, pch_iir); | 729 | I915_WRITE(SDEIIR, pch_iir); | |
726 | } | 730 | } | |
727 | 731 | |||
728 | I915_WRITE(DEIIR, de_iir); | 732 | I915_WRITE(DEIIR, de_iir); | |
729 | ret = IRQ_HANDLED; | 733 | ret = IRQ_HANDLED; | |
730 | } | 734 | } | |
731 | 735 | |||
732 | pm_iir = I915_READ(GEN6_PMIIR); | 736 | pm_iir = I915_READ(GEN6_PMIIR); | |
733 | if (pm_iir) { | 737 | if (pm_iir) { | |
734 | if (pm_iir & GEN6_PM_DEFERRED_EVENTS) | 738 | if (pm_iir & GEN6_PM_DEFERRED_EVENTS) | |
735 | gen6_queue_rps_work(dev_priv, pm_iir); | 739 | gen6_queue_rps_work(dev_priv, pm_iir); | |
736 | I915_WRITE(GEN6_PMIIR, pm_iir); | 740 | I915_WRITE(GEN6_PMIIR, pm_iir); | |
737 | ret = IRQ_HANDLED; | 741 | ret = IRQ_HANDLED; | |
738 | } | 742 | } | |
739 | 743 | |||
740 | I915_WRITE(DEIER, de_ier); | 744 | I915_WRITE(DEIER, de_ier); | |
741 | POSTING_READ(DEIER); | 745 | POSTING_READ(DEIER); | |
742 | 746 | |||
743 | return ret; | 747 | return ret; | |
744 | } | 748 | } | |
745 | 749 | |||
746 | static void ilk_gt_irq_handler(struct drm_device *dev, | 750 | static void ilk_gt_irq_handler(struct drm_device *dev, | |
747 | struct drm_i915_private *dev_priv, | 751 | struct drm_i915_private *dev_priv, | |
748 | u32 gt_iir) | 752 | u32 gt_iir) | |
749 | { | 753 | { | |
750 | if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) | 754 | if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) | |
751 | notify_ring(dev, &dev_priv->ring[RCS]); | 755 | notify_ring(dev, &dev_priv->ring[RCS]); | |
752 | if (gt_iir & GT_BSD_USER_INTERRUPT) | 756 | if (gt_iir & GT_BSD_USER_INTERRUPT) | |
753 | notify_ring(dev, &dev_priv->ring[VCS]); | 757 | notify_ring(dev, &dev_priv->ring[VCS]); | |
754 | } | 758 | } | |
755 | 759 | |||
756 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) | 760 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |
757 | { | 761 | { | |
758 | struct drm_device *dev = (struct drm_device *) arg; | 762 | struct drm_device *dev = (struct drm_device *) arg; | |
759 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 763 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
760 | int ret = IRQ_NONE; | 764 | int ret = IRQ_NONE; | |
761 | u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; | 765 | u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; | |
762 | 766 | |||
763 | atomic_inc(&dev_priv->irq_received); | 767 | atomic_inc(&dev_priv->irq_received); | |
764 | 768 | |||
765 | /* disable master interrupt before clearing iir */ | 769 | /* disable master interrupt before clearing iir */ | |
766 | de_ier = I915_READ(DEIER); | 770 | de_ier = I915_READ(DEIER); | |
767 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | 771 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | |
768 | POSTING_READ(DEIER); | 772 | POSTING_READ(DEIER); | |
769 | 773 | |||
770 | de_iir = I915_READ(DEIIR); | 774 | de_iir = I915_READ(DEIIR); | |
771 | gt_iir = I915_READ(GTIIR); | 775 | gt_iir = I915_READ(GTIIR); | |
772 | pch_iir = I915_READ(SDEIIR); | 776 | pch_iir = I915_READ(SDEIIR); | |
773 | pm_iir = I915_READ(GEN6_PMIIR); | 777 | pm_iir = I915_READ(GEN6_PMIIR); | |
774 | 778 | |||
775 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && | 779 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && | |
776 | (!IS_GEN6(dev) || pm_iir == 0)) | 780 | (!IS_GEN6(dev) || pm_iir == 0)) | |
777 | goto done; | 781 | goto done; | |
778 | 782 | |||
779 | ret = IRQ_HANDLED; | 783 | ret = IRQ_HANDLED; | |
780 | 784 | |||
781 | if (IS_GEN5(dev)) | 785 | if (IS_GEN5(dev)) | |
782 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); | 786 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); | |
783 | else | 787 | else | |
784 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | 788 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | |
785 | 789 | |||
786 | if (de_iir & DE_GSE) | 790 | if (de_iir & DE_GSE) | |
787 | intel_opregion_gse_intr(dev); | 791 | intel_opregion_gse_intr(dev); | |
788 | 792 | |||
789 | if (de_iir & DE_PIPEA_VBLANK) | 793 | if (de_iir & DE_PIPEA_VBLANK) | |
790 | drm_handle_vblank(dev, 0); | 794 | drm_handle_vblank(dev, 0); | |
791 | 795 | |||
792 | if (de_iir & DE_PIPEB_VBLANK) | 796 | if (de_iir & DE_PIPEB_VBLANK) | |
793 | drm_handle_vblank(dev, 1); | 797 | drm_handle_vblank(dev, 1); | |
794 | 798 | |||
795 | if (de_iir & DE_PLANEA_FLIP_DONE) { | 799 | if (de_iir & DE_PLANEA_FLIP_DONE) { | |
796 | intel_prepare_page_flip(dev, 0); | 800 | intel_prepare_page_flip(dev, 0); | |
797 | intel_finish_page_flip_plane(dev, 0); | 801 | intel_finish_page_flip_plane(dev, 0); | |
798 | } | 802 | } | |
799 | 803 | |||
800 | if (de_iir & DE_PLANEB_FLIP_DONE) { | 804 | if (de_iir & DE_PLANEB_FLIP_DONE) { | |
801 | intel_prepare_page_flip(dev, 1); | 805 | intel_prepare_page_flip(dev, 1); | |
802 | intel_finish_page_flip_plane(dev, 1); | 806 | intel_finish_page_flip_plane(dev, 1); | |
803 | } | 807 | } | |
804 | 808 | |||
805 | /* check event from PCH */ | 809 | /* check event from PCH */ | |
806 | if (de_iir & DE_PCH_EVENT) { | 810 | if (de_iir & DE_PCH_EVENT) { | |
807 | if (HAS_PCH_CPT(dev)) | 811 | if (HAS_PCH_CPT(dev)) | |
808 | cpt_irq_handler(dev, pch_iir); | 812 | cpt_irq_handler(dev, pch_iir); | |
809 | else | 813 | else | |
810 | ibx_irq_handler(dev, pch_iir); | 814 | ibx_irq_handler(dev, pch_iir); | |
811 | } | 815 | } | |
812 | 816 | |||
813 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) | 817 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) | |
814 | ironlake_handle_rps_change(dev); | 818 | ironlake_handle_rps_change(dev); | |
815 | 819 | |||
816 | if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) | 820 | if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) | |
817 | gen6_queue_rps_work(dev_priv, pm_iir); | 821 | gen6_queue_rps_work(dev_priv, pm_iir); | |
818 | 822 | |||
819 | /* should clear PCH hotplug event before clear CPU irq */ | 823 | /* should clear PCH hotplug event before clear CPU irq */ | |
820 | I915_WRITE(SDEIIR, pch_iir); | 824 | I915_WRITE(SDEIIR, pch_iir); | |
821 | I915_WRITE(GTIIR, gt_iir); | 825 | I915_WRITE(GTIIR, gt_iir); | |
822 | I915_WRITE(DEIIR, de_iir); | 826 | I915_WRITE(DEIIR, de_iir); | |
823 | I915_WRITE(GEN6_PMIIR, pm_iir); | 827 | I915_WRITE(GEN6_PMIIR, pm_iir); | |
824 | 828 | |||
825 | done: | 829 | done: | |
826 | I915_WRITE(DEIER, de_ier); | 830 | I915_WRITE(DEIER, de_ier); | |
827 | POSTING_READ(DEIER); | 831 | POSTING_READ(DEIER); | |
828 | 832 | |||
829 | return ret; | 833 | return ret; | |
830 | } | 834 | } | |
831 | 835 | |||
832 | /** | 836 | /** | |
833 | * i915_error_work_func - do process context error handling work | 837 | * i915_error_work_func - do process context error handling work | |
834 | * @work: work struct | 838 | * @work: work struct | |
835 | * | 839 | * | |
836 | * Fire an error uevent so userspace can see that a hang or error | 840 | * Fire an error uevent so userspace can see that a hang or error | |
837 | * was detected. | 841 | * was detected. | |
838 | */ | 842 | */ | |
839 | static void i915_error_work_func(struct work_struct *work) | 843 | static void i915_error_work_func(struct work_struct *work) | |
840 | { | 844 | { | |
841 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 845 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | |
842 | error_work); | 846 | error_work); | |
843 | struct drm_device *dev = dev_priv->dev; | 847 | struct drm_device *dev = dev_priv->dev; | |
844 | char *error_event[] = { "ERROR=1", NULL }; | 848 | char *error_event[] = { "ERROR=1", NULL }; | |
845 | char *reset_event[] = { "RESET=1", NULL }; | 849 | char *reset_event[] = { "RESET=1", NULL }; | |
846 | char *reset_done_event[] = { "ERROR=0", NULL }; | 850 | char *reset_done_event[] = { "ERROR=0", NULL }; | |
847 | 851 | |||
848 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); | 852 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); | |
849 | 853 | |||
850 | if (atomic_read(&dev_priv->mm.wedged)) { | 854 | if (atomic_read(&dev_priv->mm.wedged)) { | |
851 | DRM_DEBUG_DRIVER("resetting chip\n"); | 855 | DRM_DEBUG_DRIVER("resetting chip\n"); | |
852 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); | 856 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); | |
853 | if (!i915_reset(dev)) { | 857 | if (!i915_reset(dev)) { | |
854 | atomic_set(&dev_priv->mm.wedged, 0); | 858 | atomic_set(&dev_priv->mm.wedged, 0); | |
855 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); | 859 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); | |
856 | } | 860 | } | |
857 | complete_all(&dev_priv->error_completion); | 861 | complete_all(&dev_priv->error_completion); | |
858 | } | 862 | } | |
859 | } | 863 | } | |
860 | 864 | |||
861 | /* NB: please notice the memset */ | 865 | /* NB: please notice the memset */ | |
862 | static void i915_get_extra_instdone(struct drm_device *dev, | 866 | static void i915_get_extra_instdone(struct drm_device *dev, | |
863 | uint32_t *instdone) | 867 | uint32_t *instdone) | |
864 | { | 868 | { | |
865 | struct drm_i915_private *dev_priv = dev->dev_private; | 869 | struct drm_i915_private *dev_priv = dev->dev_private; | |
866 | memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); | 870 | memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); | |
867 | 871 | |||
868 | switch(INTEL_INFO(dev)->gen) { | 872 | switch(INTEL_INFO(dev)->gen) { | |
869 | case 2: | 873 | case 2: | |
870 | case 3: | 874 | case 3: | |
871 | instdone[0] = I915_READ(INSTDONE); | 875 | instdone[0] = I915_READ(INSTDONE); | |
872 | break; | 876 | break; | |
873 | case 4: | 877 | case 4: | |
874 | case 5: | 878 | case 5: | |
875 | case 6: | 879 | case 6: | |
876 | instdone[0] = I915_READ(INSTDONE_I965); | 880 | instdone[0] = I915_READ(INSTDONE_I965); | |
877 | instdone[1] = I915_READ(INSTDONE1); | 881 | instdone[1] = I915_READ(INSTDONE1); | |
878 | break; | 882 | break; | |
879 | default: | 883 | default: | |
880 | WARN_ONCE(1, "Unsupported platform\n"); | 884 | WARN_ONCE(1, "Unsupported platform\n"); | |
881 | case 7: | 885 | case 7: | |
882 | instdone[0] = I915_READ(GEN7_INSTDONE_1); | 886 | instdone[0] = I915_READ(GEN7_INSTDONE_1); | |
883 | instdone[1] = I915_READ(GEN7_SC_INSTDONE); | 887 | instdone[1] = I915_READ(GEN7_SC_INSTDONE); | |
884 | instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); | 888 | instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); | |
885 | instdone[3] = I915_READ(GEN7_ROW_INSTDONE); | 889 | instdone[3] = I915_READ(GEN7_ROW_INSTDONE); | |
886 | break; | 890 | break; | |
887 | } | 891 | } | |
888 | } | 892 | } | |
889 | 893 | |||
890 | #ifdef CONFIG_DEBUG_FS | 894 | #ifdef CONFIG_DEBUG_FS | |
891 | static struct drm_i915_error_object * | 895 | static struct drm_i915_error_object * | |
892 | i915_error_object_create(struct drm_i915_private *dev_priv, | 896 | i915_error_object_create(struct drm_i915_private *dev_priv, | |
893 | struct drm_i915_gem_object *src) | 897 | struct drm_i915_gem_object *src) | |
894 | { | 898 | { | |
895 | struct drm_i915_error_object *dst; | 899 | struct drm_i915_error_object *dst; | |
896 | int i, count; | 900 | int i, count; | |
897 | u32 reloc_offset; | 901 | u32 reloc_offset; | |
898 | 902 | |||
899 | if (src == NULL || src->pages == NULL) | 903 | if (src == NULL || src->pages == NULL) | |
900 | return NULL; | 904 | return NULL; | |
901 | 905 | |||
902 | count = src->base.size / PAGE_SIZE; | 906 | count = src->base.size / PAGE_SIZE; | |
903 | 907 | |||
904 | dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC); | 908 | dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC); | |
905 | if (dst == NULL) | 909 | if (dst == NULL) | |
906 | return NULL; | 910 | return NULL; | |
907 | 911 | |||
908 | reloc_offset = src->gtt_offset; | 912 | reloc_offset = src->gtt_offset; | |
909 | for (i = 0; i < count; i++) { | 913 | for (i = 0; i < count; i++) { | |
910 | unsigned long flags; | 914 | unsigned long flags; | |
911 | void *d; | 915 | void *d; | |
912 | 916 | |||
913 | d = kmalloc(PAGE_SIZE, GFP_ATOMIC); | 917 | d = kmalloc(PAGE_SIZE, GFP_ATOMIC); | |
914 | if (d == NULL) | 918 | if (d == NULL) | |
915 | goto unwind; | 919 | goto unwind; | |
916 | 920 | |||
917 | local_irq_save(flags); | 921 | local_irq_save(flags); | |
918 | if (reloc_offset < dev_priv->mm.gtt_mappable_end && | 922 | if (reloc_offset < dev_priv->mm.gtt_mappable_end && | |
919 | src->has_global_gtt_mapping) { | 923 | src->has_global_gtt_mapping) { | |
920 | void __iomem *s; | 924 | void __iomem *s; | |
921 | 925 | |||
922 | /* Simply ignore tiling or any overlapping fence. | 926 | /* Simply ignore tiling or any overlapping fence. | |
923 | * It's part of the error state, and this hopefully | 927 | * It's part of the error state, and this hopefully | |
924 | * captures what the GPU read. | 928 | * captures what the GPU read. | |
925 | */ | 929 | */ | |
926 | 930 | |||
927 | s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 931 | s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | |
928 | reloc_offset); | 932 | reloc_offset); | |
929 | memcpy_fromio(d, s, PAGE_SIZE); | 933 | memcpy_fromio(d, s, PAGE_SIZE); | |
930 | io_mapping_unmap_atomic(s); | 934 | io_mapping_unmap_atomic(s); | |
931 | } else { | 935 | } else { | |
932 | struct page *page; | 936 | struct page *page; | |
933 | void *s; | 937 | void *s; | |
934 | 938 | |||
935 | page = i915_gem_object_get_page(src, i); | 939 | page = i915_gem_object_get_page(src, i); | |
936 | 940 | |||
937 | drm_clflush_pages(&page, 1); | 941 | drm_clflush_pages(&page, 1); | |
938 | 942 | |||
939 | s = kmap_atomic(page); | 943 | s = kmap_atomic(page); | |
940 | memcpy(d, s, PAGE_SIZE); | 944 | memcpy(d, s, PAGE_SIZE); | |
941 | kunmap_atomic(s); | 945 | kunmap_atomic(s); | |
942 | 946 | |||
943 | drm_clflush_pages(&page, 1); | 947 | drm_clflush_pages(&page, 1); | |
944 | } | 948 | } | |
945 | local_irq_restore(flags); | 949 | local_irq_restore(flags); | |
946 | 950 | |||
947 | dst->pages[i] = d; | 951 | dst->pages[i] = d; | |
948 | 952 | |||
949 | reloc_offset += PAGE_SIZE; | 953 | reloc_offset += PAGE_SIZE; | |
950 | } | 954 | } | |
951 | dst->page_count = count; | 955 | dst->page_count = count; | |
952 | dst->gtt_offset = src->gtt_offset; | 956 | dst->gtt_offset = src->gtt_offset; | |
953 | 957 | |||
954 | return dst; | 958 | return dst; | |
955 | 959 | |||
956 | unwind: | 960 | unwind: | |
957 | while (i--) | 961 | while (i--) | |
958 | kfree(dst->pages[i]); | 962 | kfree(dst->pages[i]); | |
959 | kfree(dst); | 963 | kfree(dst); | |
960 | return NULL; | 964 | return NULL; | |
961 | } | 965 | } | |
962 | 966 | |||
963 | static void | 967 | static void | |
964 | i915_error_object_free(struct drm_i915_error_object *obj) | 968 | i915_error_object_free(struct drm_i915_error_object *obj) | |
965 | { | 969 | { | |
966 | int page; | 970 | int page; | |
967 | 971 | |||
968 | if (obj == NULL) | 972 | if (obj == NULL) | |
969 | return; | 973 | return; | |
970 | 974 | |||
971 | for (page = 0; page < obj->page_count; page++) | 975 | for (page = 0; page < obj->page_count; page++) | |
972 | kfree(obj->pages[page]); | 976 | kfree(obj->pages[page]); | |
973 | 977 | |||
974 | kfree(obj); | 978 | kfree(obj); | |
975 | } | 979 | } | |
976 | 980 | |||
977 | void | 981 | void | |
978 | i915_error_state_free(struct kref *error_ref) | 982 | i915_error_state_free(struct kref *error_ref) | |
979 | { | 983 | { | |
980 | struct drm_i915_error_state *error = container_of(error_ref, | 984 | struct drm_i915_error_state *error = container_of(error_ref, | |
981 | typeof(*error), ref); | 985 | typeof(*error), ref); | |
982 | int i; | 986 | int i; | |
983 | 987 | |||
984 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { | 988 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { | |
985 | i915_error_object_free(error->ring[i].batchbuffer); | 989 | i915_error_object_free(error->ring[i].batchbuffer); | |
986 | i915_error_object_free(error->ring[i].ringbuffer); | 990 | i915_error_object_free(error->ring[i].ringbuffer); | |
987 | kfree(error->ring[i].requests); | 991 | kfree(error->ring[i].requests); | |
988 | } | 992 | } | |
989 | 993 | |||
990 | kfree(error->active_bo); | 994 | kfree(error->active_bo); | |
991 | kfree(error->overlay); | 995 | kfree(error->overlay); | |
992 | kfree(error); | 996 | kfree(error); | |
993 | } | 997 | } | |
994 | static void capture_bo(struct drm_i915_error_buffer *err, | 998 | static void capture_bo(struct drm_i915_error_buffer *err, | |
995 | struct drm_i915_gem_object *obj) | 999 | struct drm_i915_gem_object *obj) | |
996 | { | 1000 | { | |
997 | err->size = obj->base.size; | 1001 | err->size = obj->base.size; | |
998 | err->name = obj->base.name; | 1002 | err->name = obj->base.name; | |
999 | err->rseqno = obj->last_read_seqno; | 1003 | err->rseqno = obj->last_read_seqno; | |
1000 | err->wseqno = obj->last_write_seqno; | 1004 | err->wseqno = obj->last_write_seqno; | |
1001 | err->gtt_offset = obj->gtt_offset; | 1005 | err->gtt_offset = obj->gtt_offset; | |
1002 | err->read_domains = obj->base.read_domains; | 1006 | err->read_domains = obj->base.read_domains; | |
1003 | err->write_domain = obj->base.write_domain; | 1007 | err->write_domain = obj->base.write_domain; | |
1004 | err->fence_reg = obj->fence_reg; | 1008 | err->fence_reg = obj->fence_reg; | |
1005 | err->pinned = 0; | 1009 | err->pinned = 0; | |
1006 | if (obj->pin_count > 0) | 1010 | if (obj->pin_count > 0) | |
1007 | err->pinned = 1; | 1011 | err->pinned = 1; | |
1008 | if (obj->user_pin_count > 0) | 1012 | if (obj->user_pin_count > 0) | |
1009 | err->pinned = -1; | 1013 | err->pinned = -1; | |
1010 | err->tiling = obj->tiling_mode; | 1014 | err->tiling = obj->tiling_mode; | |
1011 | err->dirty = obj->dirty; | 1015 | err->dirty = obj->dirty; | |
1012 | err->purgeable = obj->madv != I915_MADV_WILLNEED; | 1016 | err->purgeable = obj->madv != I915_MADV_WILLNEED; | |
1013 | err->ring = obj->ring ? obj->ring->id : -1; | 1017 | err->ring = obj->ring ? obj->ring->id : -1; | |
1014 | err->cache_level = obj->cache_level; | 1018 | err->cache_level = obj->cache_level; | |
1015 | } | 1019 | } | |
1016 | 1020 | |||
1017 | static u32 capture_active_bo(struct drm_i915_error_buffer *err, | 1021 | static u32 capture_active_bo(struct drm_i915_error_buffer *err, | |
1018 | int count, struct list_head *head) | 1022 | int count, struct list_head *head) | |
1019 | { | 1023 | { | |
1020 | struct drm_i915_gem_object *obj; | 1024 | struct drm_i915_gem_object *obj; | |
1021 | int i = 0; | 1025 | int i = 0; | |
1022 | 1026 | |||
1023 | list_for_each_entry(obj, head, mm_list) { | 1027 | list_for_each_entry(obj, head, mm_list) { | |
1024 | capture_bo(err++, obj); | 1028 | capture_bo(err++, obj); | |
1025 | if (++i == count) | 1029 | if (++i == count) | |
1026 | break; | 1030 | break; | |
1027 | } | 1031 | } | |
1028 | 1032 | |||
1029 | return i; | 1033 | return i; | |
1030 | } | 1034 | } | |
1031 | 1035 | |||
1032 | static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, | 1036 | static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, | |
1033 | int count, struct list_head *head) | 1037 | int count, struct list_head *head) | |
1034 | { | 1038 | { | |
1035 | struct drm_i915_gem_object *obj; | 1039 | struct drm_i915_gem_object *obj; | |
1036 | int i = 0; | 1040 | int i = 0; | |
1037 | 1041 | |||
1038 | list_for_each_entry(obj, head, gtt_list) { | 1042 | list_for_each_entry(obj, head, gtt_list) { | |
1039 | if (obj->pin_count == 0) | 1043 | if (obj->pin_count == 0) | |
1040 | continue; | 1044 | continue; | |
1041 | 1045 | |||
1042 | capture_bo(err++, obj); | 1046 | capture_bo(err++, obj); | |
1043 | if (++i == count) | 1047 | if (++i == count) | |
1044 | break; | 1048 | break; | |
1045 | } | 1049 | } | |
1046 | 1050 | |||
1047 | return i; | 1051 | return i; | |
1048 | } | 1052 | } | |
1049 | 1053 | |||
1050 | static void i915_gem_record_fences(struct drm_device *dev, | 1054 | static void i915_gem_record_fences(struct drm_device *dev, | |
1051 | struct drm_i915_error_state *error) | 1055 | struct drm_i915_error_state *error) | |
1052 | { | 1056 | { | |
1053 | struct drm_i915_private *dev_priv = dev->dev_private; | 1057 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1054 | int i; | 1058 | int i; | |
1055 | 1059 | |||
1056 | /* Fences */ | 1060 | /* Fences */ | |
1057 | switch (INTEL_INFO(dev)->gen) { | 1061 | switch (INTEL_INFO(dev)->gen) { | |
1058 | case 7: | 1062 | case 7: | |
1059 | case 6: | 1063 | case 6: | |
1060 | for (i = 0; i < 16; i++) | 1064 | for (i = 0; i < 16; i++) | |
1061 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | 1065 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | |
1062 | break; | 1066 | break; | |
1063 | case 5: | 1067 | case 5: | |
1064 | case 4: | 1068 | case 4: | |
1065 | for (i = 0; i < 16; i++) | 1069 | for (i = 0; i < 16; i++) | |
1066 | error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | 1070 | error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | |
1067 | break; | 1071 | break; | |
1068 | case 3: | 1072 | case 3: | |
1069 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 1073 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | |
1070 | for (i = 0; i < 8; i++) | 1074 | for (i = 0; i < 8; i++) | |
1071 | error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | 1075 | error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | |
1072 | case 2: | 1076 | case 2: | |
1073 | for (i = 0; i < 8; i++) | 1077 | for (i = 0; i < 8; i++) | |
1074 | error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | 1078 | error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | |
1075 | break; | 1079 | break; | |
1076 | 1080 | |||
1077 | } | 1081 | } | |
1078 | } | 1082 | } | |
1079 | 1083 | |||
1080 | static struct drm_i915_error_object * | 1084 | static struct drm_i915_error_object * | |
1081 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | 1085 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | |
1082 | struct intel_ring_buffer *ring) | 1086 | struct intel_ring_buffer *ring) | |
1083 | { | 1087 | { | |
1084 | struct drm_i915_gem_object *obj; | 1088 | struct drm_i915_gem_object *obj; | |
1085 | u32 seqno; | 1089 | u32 seqno; | |
1086 | 1090 | |||
1087 | if (!ring->get_seqno) | 1091 | if (!ring->get_seqno) | |
1088 | return NULL; | 1092 | return NULL; | |
1089 | 1093 | |||
1090 | if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { | 1094 | if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { | |
1091 | u32 acthd = I915_READ(ACTHD); | 1095 | u32 acthd = I915_READ(ACTHD); | |
1092 | 1096 | |||
1093 | if (WARN_ON(ring->id != RCS)) | 1097 | if (WARN_ON(ring->id != RCS)) | |
1094 | return NULL; | 1098 | return NULL; | |
1095 | 1099 | |||
1096 | obj = ring->private; | 1100 | obj = ring->private; | |
1097 | if (acthd >= obj->gtt_offset && | 1101 | if (acthd >= obj->gtt_offset && | |
1098 | acthd < obj->gtt_offset + obj->base.size) | 1102 | acthd < obj->gtt_offset + obj->base.size) | |
1099 | return i915_error_object_create(dev_priv, obj); | 1103 | return i915_error_object_create(dev_priv, obj); | |
1100 | } | 1104 | } | |
1101 | 1105 | |||
1102 | seqno = ring->get_seqno(ring, false); | 1106 | seqno = ring->get_seqno(ring, false); | |
1103 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | 1107 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | |
1104 | if (obj->ring != ring) | 1108 | if (obj->ring != ring) | |
1105 | continue; | 1109 | continue; | |
1106 | 1110 | |||
1107 | if (i915_seqno_passed(seqno, obj->last_read_seqno)) | 1111 | if (i915_seqno_passed(seqno, obj->last_read_seqno)) | |
1108 | continue; | 1112 | continue; | |
1109 | 1113 | |||
1110 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) | 1114 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) | |
1111 | continue; | 1115 | continue; | |
1112 | 1116 | |||
1113 | /* We need to copy these to an anonymous buffer as the simplest | 1117 | /* We need to copy these to an anonymous buffer as the simplest | |
1114 | * method to avoid being overwritten by userspace. | 1118 | * method to avoid being overwritten by userspace. | |
1115 | */ | 1119 | */ | |
1116 | return i915_error_object_create(dev_priv, obj); | 1120 | return i915_error_object_create(dev_priv, obj); | |
1117 | } | 1121 | } | |
1118 | 1122 | |||
1119 | return NULL; | 1123 | return NULL; | |
1120 | } | 1124 | } | |
1121 | 1125 | |||
1122 | static void i915_record_ring_state(struct drm_device *dev, | 1126 | static void i915_record_ring_state(struct drm_device *dev, | |
1123 | struct drm_i915_error_state *error, | 1127 | struct drm_i915_error_state *error, | |
1124 | struct intel_ring_buffer *ring) | 1128 | struct intel_ring_buffer *ring) | |
1125 | { | 1129 | { | |
1126 | struct drm_i915_private *dev_priv = dev->dev_private; | 1130 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1127 | 1131 | |||
1128 | if (INTEL_INFO(dev)->gen >= 6) { | 1132 | if (INTEL_INFO(dev)->gen >= 6) { | |
1129 | error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); | 1133 | error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); | |
1130 | error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); | 1134 | error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); | |
1131 | error->semaphore_mboxes[ring->id][0] | 1135 | error->semaphore_mboxes[ring->id][0] | |
1132 | = I915_READ(RING_SYNC_0(ring->mmio_base)); | 1136 | = I915_READ(RING_SYNC_0(ring->mmio_base)); | |
1133 | error->semaphore_mboxes[ring->id][1] | 1137 | error->semaphore_mboxes[ring->id][1] | |
1134 | = I915_READ(RING_SYNC_1(ring->mmio_base)); | 1138 | = I915_READ(RING_SYNC_1(ring->mmio_base)); | |
1135 | error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; | 1139 | error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; | |
1136 | error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; | 1140 | error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; | |
1137 | } | 1141 | } | |
1138 | 1142 | |||
1139 | if (INTEL_INFO(dev)->gen >= 4) { | 1143 | if (INTEL_INFO(dev)->gen >= 4) { | |
1140 | error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); | 1144 | error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); | |
1141 | error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); | 1145 | error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); | |
1142 | error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); | 1146 | error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); | |
1143 | error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); | 1147 | error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); | |
1144 | error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); | 1148 | error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); | |
1145 | if (ring->id == RCS) | 1149 | if (ring->id == RCS) | |
1146 | error->bbaddr = I915_READ64(BB_ADDR); | 1150 | error->bbaddr = I915_READ64(BB_ADDR); | |
1147 | } else { | 1151 | } else { | |
1148 | error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); | 1152 | error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); | |
1149 | error->ipeir[ring->id] = I915_READ(IPEIR); | 1153 | error->ipeir[ring->id] = I915_READ(IPEIR); | |
1150 | error->ipehr[ring->id] = I915_READ(IPEHR); | 1154 | error->ipehr[ring->id] = I915_READ(IPEHR); | |
1151 | error->instdone[ring->id] = I915_READ(INSTDONE); | 1155 | error->instdone[ring->id] = I915_READ(INSTDONE); | |
1152 | } | 1156 | } | |
1153 | 1157 | |||
1158 | #ifdef __NetBSD__ | |||
1159 | error->waiting[ring->id] = DRM_WAITERS_P(&ring->irq_queue, | |||
1160 | &drm_global_mutex); | |||
1161 | #else | |||
1154 | error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); | 1162 | error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); | |
1163 | #endif | |||
1155 | error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); | 1164 | error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); | |
1156 | error->seqno[ring->id] = ring->get_seqno(ring, false); | 1165 | error->seqno[ring->id] = ring->get_seqno(ring, false); | |
1157 | error->acthd[ring->id] = intel_ring_get_active_head(ring); | 1166 | error->acthd[ring->id] = intel_ring_get_active_head(ring); | |
1158 | error->head[ring->id] = I915_READ_HEAD(ring); | 1167 | error->head[ring->id] = I915_READ_HEAD(ring); | |
1159 | error->tail[ring->id] = I915_READ_TAIL(ring); | 1168 | error->tail[ring->id] = I915_READ_TAIL(ring); | |
1160 | error->ctl[ring->id] = I915_READ_CTL(ring); | 1169 | error->ctl[ring->id] = I915_READ_CTL(ring); | |
1161 | 1170 | |||
1162 | error->cpu_ring_head[ring->id] = ring->head; | 1171 | error->cpu_ring_head[ring->id] = ring->head; | |
1163 | error->cpu_ring_tail[ring->id] = ring->tail; | 1172 | error->cpu_ring_tail[ring->id] = ring->tail; | |
1164 | } | 1173 | } | |
1165 | 1174 | |||
1166 | static void i915_gem_record_rings(struct drm_device *dev, | 1175 | static void i915_gem_record_rings(struct drm_device *dev, | |
1167 | struct drm_i915_error_state *error) | 1176 | struct drm_i915_error_state *error) | |
1168 | { | 1177 | { | |
1169 | struct drm_i915_private *dev_priv = dev->dev_private; | 1178 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1170 | struct intel_ring_buffer *ring; | 1179 | struct intel_ring_buffer *ring; | |
1171 | struct drm_i915_gem_request *request; | 1180 | struct drm_i915_gem_request *request; | |
1172 | int i, count; | 1181 | int i, count; | |
1173 | 1182 | |||
1174 | for_each_ring(ring, dev_priv, i) { | 1183 | for_each_ring(ring, dev_priv, i) { | |
1175 | i915_record_ring_state(dev, error, ring); | 1184 | i915_record_ring_state(dev, error, ring); | |
1176 | 1185 | |||
1177 | error->ring[i].batchbuffer = | 1186 | error->ring[i].batchbuffer = | |
1178 | i915_error_first_batchbuffer(dev_priv, ring); | 1187 | i915_error_first_batchbuffer(dev_priv, ring); | |
1179 | 1188 | |||
1180 | error->ring[i].ringbuffer = | 1189 | error->ring[i].ringbuffer = | |
1181 | i915_error_object_create(dev_priv, ring->obj); | 1190 | i915_error_object_create(dev_priv, ring->obj); | |
1182 | 1191 | |||
1183 | count = 0; | 1192 | count = 0; | |
1184 | list_for_each_entry(request, &ring->request_list, list) | 1193 | list_for_each_entry(request, &ring->request_list, list) | |
1185 | count++; | 1194 | count++; | |
1186 | 1195 | |||
1187 | error->ring[i].num_requests = count; | 1196 | error->ring[i].num_requests = count; | |
1188 | error->ring[i].requests = | 1197 | error->ring[i].requests = | |
1189 | kmalloc(count*sizeof(struct drm_i915_error_request), | 1198 | kmalloc(count*sizeof(struct drm_i915_error_request), | |
1190 | GFP_ATOMIC); | 1199 | GFP_ATOMIC); | |
1191 | if (error->ring[i].requests == NULL) { | 1200 | if (error->ring[i].requests == NULL) { | |
1192 | error->ring[i].num_requests = 0; | 1201 | error->ring[i].num_requests = 0; | |
1193 | continue; | 1202 | continue; | |
1194 | } | 1203 | } | |
1195 | 1204 | |||
1196 | count = 0; | 1205 | count = 0; | |
1197 | list_for_each_entry(request, &ring->request_list, list) { | 1206 | list_for_each_entry(request, &ring->request_list, list) { | |
1198 | struct drm_i915_error_request *erq; | 1207 | struct drm_i915_error_request *erq; | |
1199 | 1208 | |||
1200 | erq = &error->ring[i].requests[count++]; | 1209 | erq = &error->ring[i].requests[count++]; | |
1201 | erq->seqno = request->seqno; | 1210 | erq->seqno = request->seqno; | |
1202 | erq->jiffies = request->emitted_jiffies; | 1211 | erq->jiffies = request->emitted_jiffies; | |
1203 | erq->tail = request->tail; | 1212 | erq->tail = request->tail; | |
1204 | } | 1213 | } | |
1205 | } | 1214 | } | |
1206 | } | 1215 | } | |
1207 | 1216 | |||
1208 | /** | 1217 | /** | |
1209 | * i915_capture_error_state - capture an error record for later analysis | 1218 | * i915_capture_error_state - capture an error record for later analysis | |
1210 | * @dev: drm device | 1219 | * @dev: drm device | |
1211 | * | 1220 | * | |
1212 | * Should be called when an error is detected (either a hang or an error | 1221 | * Should be called when an error is detected (either a hang or an error | |
1213 | * interrupt) to capture error state from the time of the error. Fills | 1222 | * interrupt) to capture error state from the time of the error. Fills | |
1214 | * out a structure which becomes available in debugfs for user level tools | 1223 | * out a structure which becomes available in debugfs for user level tools | |
1215 | * to pick up. | 1224 | * to pick up. | |
1216 | */ | 1225 | */ | |
1217 | static void i915_capture_error_state(struct drm_device *dev) | 1226 | static void i915_capture_error_state(struct drm_device *dev) | |
1218 | { | 1227 | { | |
1219 | struct drm_i915_private *dev_priv = dev->dev_private; | 1228 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1220 | struct drm_i915_gem_object *obj; | 1229 | struct drm_i915_gem_object *obj; | |
1221 | struct drm_i915_error_state *error; | 1230 | struct drm_i915_error_state *error; | |
1222 | unsigned long flags; | 1231 | unsigned long flags; | |
1223 | int i, pipe; | 1232 | int i, pipe; | |
1224 | 1233 | |||
1225 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 1234 | spin_lock_irqsave(&dev_priv->error_lock, flags); | |
1226 | error = dev_priv->first_error; | 1235 | error = dev_priv->first_error; | |
1227 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | 1236 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | |
1228 | if (error) | 1237 | if (error) | |
1229 | return; | 1238 | return; | |
1230 | 1239 | |||
1231 | /* Account for pipe specific data like PIPE*STAT */ | 1240 | /* Account for pipe specific data like PIPE*STAT */ | |
1232 | error = kzalloc(sizeof(*error), GFP_ATOMIC); | 1241 | error = kzalloc(sizeof(*error), GFP_ATOMIC); | |
1233 | if (!error) { | 1242 | if (!error) { | |
1234 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); | 1243 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); | |
1235 | return; | 1244 | return; | |
1236 | } | 1245 | } | |
1237 | 1246 | |||
1238 | DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", | 1247 | DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", | |
1239 | dev->primary->index); | 1248 | dev->primary->index); | |
1240 | 1249 | |||
1241 | kref_init(&error->ref); | 1250 | kref_init(&error->ref); | |
1242 | error->eir = I915_READ(EIR); | 1251 | error->eir = I915_READ(EIR); | |
1243 | error->pgtbl_er = I915_READ(PGTBL_ER); | 1252 | error->pgtbl_er = I915_READ(PGTBL_ER); | |
1244 | error->ccid = I915_READ(CCID); | 1253 | error->ccid = I915_READ(CCID); | |
1245 | 1254 | |||
1246 | if (HAS_PCH_SPLIT(dev)) | 1255 | if (HAS_PCH_SPLIT(dev)) | |
1247 | error->ier = I915_READ(DEIER) | I915_READ(GTIER); | 1256 | error->ier = I915_READ(DEIER) | I915_READ(GTIER); | |
1248 | else if (IS_VALLEYVIEW(dev)) | 1257 | else if (IS_VALLEYVIEW(dev)) | |
1249 | error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); | 1258 | error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); | |
1250 | else if (IS_GEN2(dev)) | 1259 | else if (IS_GEN2(dev)) | |
1251 | error->ier = I915_READ16(IER); | 1260 | error->ier = I915_READ16(IER); | |
1252 | else | 1261 | else | |
1253 | error->ier = I915_READ(IER); | 1262 | error->ier = I915_READ(IER); | |
1254 | 1263 | |||
1255 | if (INTEL_INFO(dev)->gen >= 6) | 1264 | if (INTEL_INFO(dev)->gen >= 6) | |
1256 | error->derrmr = I915_READ(DERRMR); | 1265 | error->derrmr = I915_READ(DERRMR); | |
1257 | 1266 | |||
1258 | if (IS_VALLEYVIEW(dev)) | 1267 | if (IS_VALLEYVIEW(dev)) | |
1259 | error->forcewake = I915_READ(FORCEWAKE_VLV); | 1268 | error->forcewake = I915_READ(FORCEWAKE_VLV); | |
1260 | else if (INTEL_INFO(dev)->gen >= 7) | 1269 | else if (INTEL_INFO(dev)->gen >= 7) | |
1261 | error->forcewake = I915_READ(FORCEWAKE_MT); | 1270 | error->forcewake = I915_READ(FORCEWAKE_MT); | |
1262 | else if (INTEL_INFO(dev)->gen == 6) | 1271 | else if (INTEL_INFO(dev)->gen == 6) | |
1263 | error->forcewake = I915_READ(FORCEWAKE); | 1272 | error->forcewake = I915_READ(FORCEWAKE); | |
1264 | 1273 | |||
1265 | for_each_pipe(pipe) | 1274 | for_each_pipe(pipe) | |
1266 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); | 1275 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); | |
1267 | 1276 | |||
1268 | if (INTEL_INFO(dev)->gen >= 6) { | 1277 | if (INTEL_INFO(dev)->gen >= 6) { | |
1269 | error->error = I915_READ(ERROR_GEN6); | 1278 | error->error = I915_READ(ERROR_GEN6); | |
1270 | error->done_reg = I915_READ(DONE_REG); | 1279 | error->done_reg = I915_READ(DONE_REG); | |
1271 | } | 1280 | } | |
1272 | 1281 | |||
1273 | if (INTEL_INFO(dev)->gen == 7) | 1282 | if (INTEL_INFO(dev)->gen == 7) | |
1274 | error->err_int = I915_READ(GEN7_ERR_INT); | 1283 | error->err_int = I915_READ(GEN7_ERR_INT); | |
1275 | 1284 | |||
1276 | i915_get_extra_instdone(dev, error->extra_instdone); | 1285 | i915_get_extra_instdone(dev, error->extra_instdone); | |
1277 | 1286 | |||
1278 | i915_gem_record_fences(dev, error); | 1287 | i915_gem_record_fences(dev, error); | |
1279 | i915_gem_record_rings(dev, error); | 1288 | i915_gem_record_rings(dev, error); | |
1280 | 1289 | |||
1281 | /* Record buffers on the active and pinned lists. */ | 1290 | /* Record buffers on the active and pinned lists. */ | |
1282 | error->active_bo = NULL; | 1291 | error->active_bo = NULL; | |
1283 | error->pinned_bo = NULL; | 1292 | error->pinned_bo = NULL; | |
1284 | 1293 | |||
1285 | i = 0; | 1294 | i = 0; | |
1286 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) | 1295 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) | |
1287 | i++; | 1296 | i++; | |
1288 | error->active_bo_count = i; | 1297 | error->active_bo_count = i; | |
1289 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) | 1298 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) | |
1290 | if (obj->pin_count) | 1299 | if (obj->pin_count) | |
1291 | i++; | 1300 | i++; | |
1292 | error->pinned_bo_count = i - error->active_bo_count; | 1301 | error->pinned_bo_count = i - error->active_bo_count; | |
1293 | 1302 | |||
1294 | error->active_bo = NULL; | 1303 | error->active_bo = NULL; | |
1295 | error->pinned_bo = NULL; | 1304 | error->pinned_bo = NULL; | |
1296 | if (i) { | 1305 | if (i) { | |
1297 | error->active_bo = kmalloc(sizeof(*error->active_bo)*i, | 1306 | error->active_bo = kmalloc(sizeof(*error->active_bo)*i, | |
1298 | GFP_ATOMIC); | 1307 | GFP_ATOMIC); | |
1299 | if (error->active_bo) | 1308 | if (error->active_bo) | |
1300 | error->pinned_bo = | 1309 | error->pinned_bo = | |
1301 | error->active_bo + error->active_bo_count; | 1310 | error->active_bo + error->active_bo_count; | |
1302 | } | 1311 | } | |
1303 | 1312 | |||
1304 | if (error->active_bo) | 1313 | if (error->active_bo) | |
1305 | error->active_bo_count = | 1314 | error->active_bo_count = | |
1306 | capture_active_bo(error->active_bo, | 1315 | capture_active_bo(error->active_bo, | |
1307 | error->active_bo_count, | 1316 | error->active_bo_count, | |
1308 | &dev_priv->mm.active_list); | 1317 | &dev_priv->mm.active_list); | |
1309 | 1318 | |||
1310 | if (error->pinned_bo) | 1319 | if (error->pinned_bo) | |
1311 | error->pinned_bo_count = | 1320 | error->pinned_bo_count = | |
1312 | capture_pinned_bo(error->pinned_bo, | 1321 | capture_pinned_bo(error->pinned_bo, | |
1313 | error->pinned_bo_count, | 1322 | error->pinned_bo_count, | |
1314 | &dev_priv->mm.bound_list); | 1323 | &dev_priv->mm.bound_list); | |
1315 | 1324 | |||
1316 | do_gettimeofday(&error->time); | 1325 | do_gettimeofday(&error->time); | |
1317 | 1326 | |||
1318 | error->overlay = intel_overlay_capture_error_state(dev); | 1327 | error->overlay = intel_overlay_capture_error_state(dev); | |
1319 | error->display = intel_display_capture_error_state(dev); | 1328 | error->display = intel_display_capture_error_state(dev); | |
1320 | 1329 | |||
1321 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 1330 | spin_lock_irqsave(&dev_priv->error_lock, flags); | |
1322 | if (dev_priv->first_error == NULL) { | 1331 | if (dev_priv->first_error == NULL) { | |
1323 | dev_priv->first_error = error; | 1332 | dev_priv->first_error = error; | |
1324 | error = NULL; | 1333 | error = NULL; | |
1325 | } | 1334 | } | |
1326 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | 1335 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | |
1327 | 1336 | |||
1328 | if (error) | 1337 | if (error) | |
1329 | i915_error_state_free(&error->ref); | 1338 | i915_error_state_free(&error->ref); | |
1330 | } | 1339 | } | |
1331 | 1340 | |||
1332 | void i915_destroy_error_state(struct drm_device *dev) | 1341 | void i915_destroy_error_state(struct drm_device *dev) | |
1333 | { | 1342 | { | |
1334 | struct drm_i915_private *dev_priv = dev->dev_private; | 1343 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1335 | struct drm_i915_error_state *error; | 1344 | struct drm_i915_error_state *error; | |
1336 | unsigned long flags; | 1345 | unsigned long flags; | |
1337 | 1346 | |||
1338 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 1347 | spin_lock_irqsave(&dev_priv->error_lock, flags); | |
1339 | error = dev_priv->first_error; | 1348 | error = dev_priv->first_error; | |
1340 | dev_priv->first_error = NULL; | 1349 | dev_priv->first_error = NULL; | |
1341 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | 1350 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | |
1342 | 1351 | |||
1343 | if (error) | 1352 | if (error) | |
1344 | kref_put(&error->ref, i915_error_state_free); | 1353 | kref_put(&error->ref, i915_error_state_free); | |
1345 | } | 1354 | } | |
1346 | #else | 1355 | #else | |
1347 | #define i915_capture_error_state(x) | 1356 | #define i915_capture_error_state(x) | |
1348 | #endif | 1357 | #endif | |
1349 | 1358 | |||
1350 | static void i915_report_and_clear_eir(struct drm_device *dev) | 1359 | static void i915_report_and_clear_eir(struct drm_device *dev) | |
1351 | { | 1360 | { | |
1352 | struct drm_i915_private *dev_priv = dev->dev_private; | 1361 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1353 | uint32_t instdone[I915_NUM_INSTDONE_REG]; | 1362 | uint32_t instdone[I915_NUM_INSTDONE_REG]; | |
1354 | u32 eir = I915_READ(EIR); | 1363 | u32 eir = I915_READ(EIR); | |
1355 | int pipe, i; | 1364 | int pipe, i; | |
1356 | 1365 | |||
1357 | if (!eir) | 1366 | if (!eir) | |
1358 | return; | 1367 | return; | |
1359 | 1368 | |||
1360 | pr_err("render error detected, EIR: 0x%08x\n", eir); | 1369 | pr_err("render error detected, EIR: 0x%08x\n", eir); | |
1361 | 1370 | |||
1362 | i915_get_extra_instdone(dev, instdone); | 1371 | i915_get_extra_instdone(dev, instdone); | |
1363 | 1372 | |||
1364 | if (IS_G4X(dev)) { | 1373 | if (IS_G4X(dev)) { | |
1365 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { | 1374 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { | |
1366 | u32 ipeir = I915_READ(IPEIR_I965); | 1375 | u32 ipeir = I915_READ(IPEIR_I965); | |
1367 | 1376 | |||
1368 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); | 1377 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); | |
1369 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | 1378 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | |
1370 | for (i = 0; i < ARRAY_SIZE(instdone); i++) | 1379 | for (i = 0; i < ARRAY_SIZE(instdone); i++) | |
1371 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | 1380 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | |
1372 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); | 1381 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); | |
1373 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); | 1382 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); | |
1374 | I915_WRITE(IPEIR_I965, ipeir); | 1383 | I915_WRITE(IPEIR_I965, ipeir); | |
1375 | POSTING_READ(IPEIR_I965); | 1384 | POSTING_READ(IPEIR_I965); | |
1376 | } | 1385 | } | |
1377 | if (eir & GM45_ERROR_PAGE_TABLE) { | 1386 | if (eir & GM45_ERROR_PAGE_TABLE) { | |
1378 | u32 pgtbl_err = I915_READ(PGTBL_ER); | 1387 | u32 pgtbl_err = I915_READ(PGTBL_ER); | |
1379 | pr_err("page table error\n"); | 1388 | pr_err("page table error\n"); | |
1380 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | 1389 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | |
1381 | I915_WRITE(PGTBL_ER, pgtbl_err); | 1390 | I915_WRITE(PGTBL_ER, pgtbl_err); | |
1382 | POSTING_READ(PGTBL_ER); | 1391 | POSTING_READ(PGTBL_ER); | |
1383 | } | 1392 | } | |
1384 | } | 1393 | } | |
1385 | 1394 | |||
1386 | if (!IS_GEN2(dev)) { | 1395 | if (!IS_GEN2(dev)) { | |
1387 | if (eir & I915_ERROR_PAGE_TABLE) { | 1396 | if (eir & I915_ERROR_PAGE_TABLE) { | |
1388 | u32 pgtbl_err = I915_READ(PGTBL_ER); | 1397 | u32 pgtbl_err = I915_READ(PGTBL_ER); | |
1389 | pr_err("page table error\n"); | 1398 | pr_err("page table error\n"); | |
1390 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | 1399 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | |
1391 | I915_WRITE(PGTBL_ER, pgtbl_err); | 1400 | I915_WRITE(PGTBL_ER, pgtbl_err); | |
1392 | POSTING_READ(PGTBL_ER); | 1401 | POSTING_READ(PGTBL_ER); | |
1393 | } | 1402 | } | |
1394 | } | 1403 | } | |
1395 | 1404 | |||
1396 | if (eir & I915_ERROR_MEMORY_REFRESH) { | 1405 | if (eir & I915_ERROR_MEMORY_REFRESH) { | |
1397 | pr_err("memory refresh error:\n"); | 1406 | pr_err("memory refresh error:\n"); | |
1398 | for_each_pipe(pipe) | 1407 | for_each_pipe(pipe) | |
1399 | pr_err("pipe %c stat: 0x%08x\n", | 1408 | pr_err("pipe %c stat: 0x%08x\n", | |
1400 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); | 1409 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); | |
1401 | /* pipestat has already been acked */ | 1410 | /* pipestat has already been acked */ | |
1402 | } | 1411 | } | |
1403 | if (eir & I915_ERROR_INSTRUCTION) { | 1412 | if (eir & I915_ERROR_INSTRUCTION) { | |
1404 | pr_err("instruction error\n"); | 1413 | pr_err("instruction error\n"); | |
1405 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); | 1414 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); | |
1406 | for (i = 0; i < ARRAY_SIZE(instdone); i++) | 1415 | for (i = 0; i < ARRAY_SIZE(instdone); i++) | |
1407 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | 1416 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | |
1408 | if (INTEL_INFO(dev)->gen < 4) { | 1417 | if (INTEL_INFO(dev)->gen < 4) { | |
1409 | u32 ipeir = I915_READ(IPEIR); | 1418 | u32 ipeir = I915_READ(IPEIR); | |
1410 | 1419 | |||
1411 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); | 1420 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); | |
1412 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); | 1421 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); | |
1413 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); | 1422 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); | |
1414 | I915_WRITE(IPEIR, ipeir); | 1423 | I915_WRITE(IPEIR, ipeir); | |
1415 | POSTING_READ(IPEIR); | 1424 | POSTING_READ(IPEIR); | |
1416 | } else { | 1425 | } else { | |
1417 | u32 ipeir = I915_READ(IPEIR_I965); | 1426 | u32 ipeir = I915_READ(IPEIR_I965); | |
1418 | 1427 | |||
1419 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); | 1428 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); | |
1420 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | 1429 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | |
1421 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); | 1430 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); | |
1422 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); | 1431 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); | |
1423 | I915_WRITE(IPEIR_I965, ipeir); | 1432 | I915_WRITE(IPEIR_I965, ipeir); | |
1424 | POSTING_READ(IPEIR_I965); | 1433 | POSTING_READ(IPEIR_I965); | |
1425 | } | 1434 | } | |
1426 | } | 1435 | } | |
1427 | 1436 | |||
1428 | I915_WRITE(EIR, eir); | 1437 | I915_WRITE(EIR, eir); | |
1429 | POSTING_READ(EIR); | 1438 | POSTING_READ(EIR); | |
1430 | eir = I915_READ(EIR); | 1439 | eir = I915_READ(EIR); | |
1431 | if (eir) { | 1440 | if (eir) { | |
1432 | /* | 1441 | /* | |
1433 | * some errors might have become stuck, | 1442 | * some errors might have become stuck, | |
1434 | * mask them. | 1443 | * mask them. | |
1435 | */ | 1444 | */ | |
1436 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); | 1445 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); | |
1437 | I915_WRITE(EMR, I915_READ(EMR) | eir); | 1446 | I915_WRITE(EMR, I915_READ(EMR) | eir); | |
1438 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | 1447 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
1439 | } | 1448 | } | |
1440 | } | 1449 | } | |
1441 | 1450 | |||
1442 | /** | 1451 | /** | |
1443 | * i915_handle_error - handle an error interrupt | 1452 | * i915_handle_error - handle an error interrupt | |
1444 | * @dev: drm device | 1453 | * @dev: drm device | |
1445 | * | 1454 | * | |
1446 | * Do some basic checking of regsiter state at error interrupt time and | 1455 | * Do some basic checking of regsiter state at error interrupt time and | |
1447 | * dump it to the syslog. Also call i915_capture_error_state() to make | 1456 | * dump it to the syslog. Also call i915_capture_error_state() to make | |
1448 | * sure we get a record and make it available in debugfs. Fire a uevent | 1457 | * sure we get a record and make it available in debugfs. Fire a uevent | |
1449 | * so userspace knows something bad happened (should trigger collection | 1458 | * so userspace knows something bad happened (should trigger collection | |
1450 | * of a ring dump etc.). | 1459 | * of a ring dump etc.). | |
1451 | */ | 1460 | */ | |
1452 | void i915_handle_error(struct drm_device *dev, bool wedged) | 1461 | void i915_handle_error(struct drm_device *dev, bool wedged) | |
1453 | { | 1462 | { | |
1454 | struct drm_i915_private *dev_priv = dev->dev_private; | 1463 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1455 | struct intel_ring_buffer *ring; | 1464 | struct intel_ring_buffer *ring; | |
1456 | int i; | 1465 | int i; | |
1457 | 1466 | |||
1458 | i915_capture_error_state(dev); | 1467 | i915_capture_error_state(dev); | |
1459 | i915_report_and_clear_eir(dev); | 1468 | i915_report_and_clear_eir(dev); | |
1460 | 1469 | |||
1461 | if (wedged) { | 1470 | if (wedged) { | |
1462 | INIT_COMPLETION(dev_priv->error_completion); | 1471 | INIT_COMPLETION(dev_priv->error_completion); | |
1463 | atomic_set(&dev_priv->mm.wedged, 1); | 1472 | atomic_set(&dev_priv->mm.wedged, 1); | |
1464 | 1473 | |||
1465 | /* | 1474 | /* | |
1466 | * Wakeup waiting processes so they don't hang | 1475 | * Wakeup waiting processes so they don't hang | |
1467 | */ | 1476 | */ | |
1468 | for_each_ring(ring, dev_priv, i) | 1477 | for_each_ring(ring, dev_priv, i) | |
1478 | #ifdef __NetBSD__ | |||
1479 | DRM_WAKEUP_ALL(&ring->irq_queue, &drm_global_mutex); | |||
1480 | #else | |||
1469 | wake_up_all(&ring->irq_queue); | 1481 | wake_up_all(&ring->irq_queue); | |
1482 | #endif | |||
1470 | } | 1483 | } | |
1471 | 1484 | |||
1472 | queue_work(dev_priv->wq, &dev_priv->error_work); | 1485 | queue_work(dev_priv->wq, &dev_priv->error_work); | |
1473 | } | 1486 | } | |
1474 | 1487 | |||
1475 | static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | 1488 | static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) | |
1476 | { | 1489 | { | |
1477 | drm_i915_private_t *dev_priv = dev->dev_private; | 1490 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1478 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 1491 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
1479 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1492 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
1480 | struct drm_i915_gem_object *obj; | 1493 | struct drm_i915_gem_object *obj; | |
1481 | struct intel_unpin_work *work; | 1494 | struct intel_unpin_work *work; | |
1482 | unsigned long flags; | 1495 | unsigned long flags; | |
1483 | bool stall_detected; | 1496 | bool stall_detected; | |
1484 | 1497 | |||
1485 | /* Ignore early vblank irqs */ | 1498 | /* Ignore early vblank irqs */ | |
1486 | if (intel_crtc == NULL) | 1499 | if (intel_crtc == NULL) | |
1487 | return; | 1500 | return; | |
1488 | 1501 | |||
1489 | spin_lock_irqsave(&dev->event_lock, flags); | 1502 | spin_lock_irqsave(&dev->event_lock, flags); | |
1490 | work = intel_crtc->unpin_work; | 1503 | work = intel_crtc->unpin_work; | |
1491 | 1504 | |||
1492 | if (work == NULL || | 1505 | if (work == NULL || | |
1493 | atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || | 1506 | atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || | |
1494 | !work->enable_stall_check) { | 1507 | !work->enable_stall_check) { | |
1495 | /* Either the pending flip IRQ arrived, or we're too early. Don't check */ | 1508 | /* Either the pending flip IRQ arrived, or we're too early. Don't check */ | |
1496 | spin_unlock_irqrestore(&dev->event_lock, flags); | 1509 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
1497 | return; | 1510 | return; | |
1498 | } | 1511 | } | |
1499 | 1512 | |||
1500 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ | 1513 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ | |
1501 | obj = work->pending_flip_obj; | 1514 | obj = work->pending_flip_obj; | |
1502 | if (INTEL_INFO(dev)->gen >= 4) { | 1515 | if (INTEL_INFO(dev)->gen >= 4) { | |
1503 | int dspsurf = DSPSURF(intel_crtc->plane); | 1516 | int dspsurf = DSPSURF(intel_crtc->plane); | |
1504 | stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == | 1517 | stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == | |
1505 | obj->gtt_offset; | 1518 | obj->gtt_offset; | |
1506 | } else { | 1519 | } else { | |
1507 | int dspaddr = DSPADDR(intel_crtc->plane); | 1520 | int dspaddr = DSPADDR(intel_crtc->plane); | |
1508 | stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + | 1521 | stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + | |
1509 | crtc->y * crtc->fb->pitches[0] + | 1522 | crtc->y * crtc->fb->pitches[0] + | |
1510 | crtc->x * crtc->fb->bits_per_pixel/8); | 1523 | crtc->x * crtc->fb->bits_per_pixel/8); | |
1511 | } | 1524 | } | |
1512 | 1525 | |||
1513 | spin_unlock_irqrestore(&dev->event_lock, flags); | 1526 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
1514 | 1527 | |||
1515 | if (stall_detected) { | 1528 | if (stall_detected) { | |
1516 | DRM_DEBUG_DRIVER("Pageflip stall detected\n"); | 1529 | DRM_DEBUG_DRIVER("Pageflip stall detected\n"); | |
1517 | intel_prepare_page_flip(dev, intel_crtc->plane); | 1530 | intel_prepare_page_flip(dev, intel_crtc->plane); | |
1518 | } | 1531 | } | |
1519 | } | 1532 | } | |
1520 | 1533 | |||
1521 | /* Called from drm generic code, passed 'crtc' which | 1534 | /* Called from drm generic code, passed 'crtc' which | |
1522 | * we use as a pipe index | 1535 | * we use as a pipe index | |
1523 | */ | 1536 | */ | |
1524 | static int i915_enable_vblank(struct drm_device *dev, int pipe) | 1537 | static int i915_enable_vblank(struct drm_device *dev, int pipe) | |
1525 | { | 1538 | { | |
1526 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1539 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1527 | unsigned long irqflags; | 1540 | unsigned long irqflags; | |
1528 | 1541 | |||
1529 | if (!i915_pipe_enabled(dev, pipe)) | 1542 | if (!i915_pipe_enabled(dev, pipe)) | |
1530 | return -EINVAL; | 1543 | return -EINVAL; | |
1531 | 1544 | |||
1532 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1545 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1533 | if (INTEL_INFO(dev)->gen >= 4) | 1546 | if (INTEL_INFO(dev)->gen >= 4) | |
1534 | i915_enable_pipestat(dev_priv, pipe, | 1547 | i915_enable_pipestat(dev_priv, pipe, | |
1535 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 1548 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
1536 | else | 1549 | else | |
1537 | i915_enable_pipestat(dev_priv, pipe, | 1550 | i915_enable_pipestat(dev_priv, pipe, | |
1538 | PIPE_VBLANK_INTERRUPT_ENABLE); | 1551 | PIPE_VBLANK_INTERRUPT_ENABLE); | |
1539 | 1552 | |||
1540 | /* maintain vblank delivery even in deep C-states */ | 1553 | /* maintain vblank delivery even in deep C-states */ | |
1541 | if (dev_priv->info->gen == 3) | 1554 | if (dev_priv->info->gen == 3) | |
1542 | I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); | 1555 | I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); | |
1543 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1556 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1544 | 1557 | |||
1545 | return 0; | 1558 | return 0; | |
1546 | } | 1559 | } | |
1547 | 1560 | |||
1548 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) | 1561 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) | |
1549 | { | 1562 | { | |
1550 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1563 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1551 | unsigned long irqflags; | 1564 | unsigned long irqflags; | |
1552 | 1565 | |||
1553 | if (!i915_pipe_enabled(dev, pipe)) | 1566 | if (!i915_pipe_enabled(dev, pipe)) | |
1554 | return -EINVAL; | 1567 | return -EINVAL; | |
1555 | 1568 | |||
1556 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1569 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1557 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | 1570 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | |
1558 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); | 1571 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); | |
1559 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1572 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1560 | 1573 | |||
1561 | return 0; | 1574 | return 0; | |
1562 | } | 1575 | } | |
1563 | 1576 | |||
1564 | static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) | 1577 | static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) | |
1565 | { | 1578 | { | |
1566 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1579 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1567 | unsigned long irqflags; | 1580 | unsigned long irqflags; | |
1568 | 1581 | |||
1569 | if (!i915_pipe_enabled(dev, pipe)) | 1582 | if (!i915_pipe_enabled(dev, pipe)) | |
1570 | return -EINVAL; | 1583 | return -EINVAL; | |
1571 | 1584 | |||
1572 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1585 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1573 | ironlake_enable_display_irq(dev_priv, | 1586 | ironlake_enable_display_irq(dev_priv, | |
1574 | DE_PIPEA_VBLANK_IVB << (5 * pipe)); | 1587 | DE_PIPEA_VBLANK_IVB << (5 * pipe)); | |
1575 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1588 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1576 | 1589 | |||
1577 | return 0; | 1590 | return 0; | |
1578 | } | 1591 | } | |
1579 | 1592 | |||
1580 | static int valleyview_enable_vblank(struct drm_device *dev, int pipe) | 1593 | static int valleyview_enable_vblank(struct drm_device *dev, int pipe) | |
1581 | { | 1594 | { | |
1582 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1595 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1583 | unsigned long irqflags; | 1596 | unsigned long irqflags; | |
1584 | u32 imr; | 1597 | u32 imr; | |
1585 | 1598 | |||
1586 | if (!i915_pipe_enabled(dev, pipe)) | 1599 | if (!i915_pipe_enabled(dev, pipe)) | |
1587 | return -EINVAL; | 1600 | return -EINVAL; | |
1588 | 1601 | |||
1589 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1602 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1590 | imr = I915_READ(VLV_IMR); | 1603 | imr = I915_READ(VLV_IMR); | |
1591 | if (pipe == 0) | 1604 | if (pipe == 0) | |
1592 | imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; | 1605 | imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; | |
1593 | else | 1606 | else | |
1594 | imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | 1607 | imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | |
1595 | I915_WRITE(VLV_IMR, imr); | 1608 | I915_WRITE(VLV_IMR, imr); | |
1596 | i915_enable_pipestat(dev_priv, pipe, | 1609 | i915_enable_pipestat(dev_priv, pipe, | |
1597 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 1610 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
1598 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1611 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1599 | 1612 | |||
1600 | return 0; | 1613 | return 0; | |
1601 | } | 1614 | } | |
1602 | 1615 | |||
1603 | /* Called from drm generic code, passed 'crtc' which | 1616 | /* Called from drm generic code, passed 'crtc' which | |
1604 | * we use as a pipe index | 1617 | * we use as a pipe index | |
1605 | */ | 1618 | */ | |
1606 | static void i915_disable_vblank(struct drm_device *dev, int pipe) | 1619 | static void i915_disable_vblank(struct drm_device *dev, int pipe) | |
1607 | { | 1620 | { | |
1608 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1621 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1609 | unsigned long irqflags; | 1622 | unsigned long irqflags; | |
1610 | 1623 | |||
1611 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1624 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1612 | if (dev_priv->info->gen == 3) | 1625 | if (dev_priv->info->gen == 3) | |
1613 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); | 1626 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); | |
1614 | 1627 | |||
1615 | i915_disable_pipestat(dev_priv, pipe, | 1628 | i915_disable_pipestat(dev_priv, pipe, | |
1616 | PIPE_VBLANK_INTERRUPT_ENABLE | | 1629 | PIPE_VBLANK_INTERRUPT_ENABLE | | |
1617 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 1630 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
1618 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1631 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1619 | } | 1632 | } | |
1620 | 1633 | |||
1621 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) | 1634 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) | |
1622 | { | 1635 | { | |
1623 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1636 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1624 | unsigned long irqflags; | 1637 | unsigned long irqflags; | |
1625 | 1638 | |||
1626 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1639 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1627 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? | 1640 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? | |
1628 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); | 1641 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); | |
1629 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1642 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1630 | } | 1643 | } | |
1631 | 1644 | |||
1632 | static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) | 1645 | static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) | |
1633 | { | 1646 | { | |
1634 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1647 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1635 | unsigned long irqflags; | 1648 | unsigned long irqflags; | |
1636 | 1649 | |||
1637 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1650 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1638 | ironlake_disable_display_irq(dev_priv, | 1651 | ironlake_disable_display_irq(dev_priv, | |
1639 | DE_PIPEA_VBLANK_IVB << (pipe * 5)); | 1652 | DE_PIPEA_VBLANK_IVB << (pipe * 5)); | |
1640 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1653 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1641 | } | 1654 | } | |
1642 | 1655 | |||
1643 | static void valleyview_disable_vblank(struct drm_device *dev, int pipe) | 1656 | static void valleyview_disable_vblank(struct drm_device *dev, int pipe) | |
1644 | { | 1657 | { | |
1645 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1658 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1646 | unsigned long irqflags; | 1659 | unsigned long irqflags; | |
1647 | u32 imr; | 1660 | u32 imr; | |
1648 | 1661 | |||
1649 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1662 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1650 | i915_disable_pipestat(dev_priv, pipe, | 1663 | i915_disable_pipestat(dev_priv, pipe, | |
1651 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 1664 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
1652 | imr = I915_READ(VLV_IMR); | 1665 | imr = I915_READ(VLV_IMR); | |
1653 | if (pipe == 0) | 1666 | if (pipe == 0) | |
1654 | imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; | 1667 | imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; | |
1655 | else | 1668 | else | |
1656 | imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | 1669 | imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | |
1657 | I915_WRITE(VLV_IMR, imr); | 1670 | I915_WRITE(VLV_IMR, imr); | |
1658 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1671 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1659 | } | 1672 | } | |
1660 | 1673 | |||
1661 | static u32 | 1674 | static u32 | |
1662 | ring_last_seqno(struct intel_ring_buffer *ring) | 1675 | ring_last_seqno(struct intel_ring_buffer *ring) | |
1663 | { | 1676 | { | |
1664 | return list_entry(ring->request_list.prev, | 1677 | return list_entry(ring->request_list.prev, | |
1665 | struct drm_i915_gem_request, list)->seqno; | 1678 | struct drm_i915_gem_request, list)->seqno; | |
1666 | } | 1679 | } | |
1667 | 1680 | |||
1668 | static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) | 1681 | static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) | |
1669 | { | 1682 | { | |
1670 | if (list_empty(&ring->request_list) || | 1683 | if (list_empty(&ring->request_list) || | |
1671 | i915_seqno_passed(ring->get_seqno(ring, false), | 1684 | i915_seqno_passed(ring->get_seqno(ring, false), | |
1672 | ring_last_seqno(ring))) { | 1685 | ring_last_seqno(ring))) { | |
1673 | /* Issue a wake-up to catch stuck h/w. */ | 1686 | /* Issue a wake-up to catch stuck h/w. */ | |
1687 | #ifdef __NetBSD__ | |||
1688 | if (DRM_WAITERS_P(&ring->irq_queue, &drm_global_mutex)) { | |||
1689 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", | |||
1690 | ring->name); | |||
1691 | DRM_WAKEUP_ALL(&ring->irq_queue, &drm_global_mutex); | |||
1692 | *err = true; | |||
1693 | } | |||
1694 | #else | |||
1674 | if (waitqueue_active(&ring->irq_queue)) { | 1695 | if (waitqueue_active(&ring->irq_queue)) { | |
1675 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", | 1696 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", | |
1676 | ring->name); | 1697 | ring->name); | |
1677 | wake_up_all(&ring->irq_queue); | 1698 | wake_up_all(&ring->irq_queue); | |
1678 | *err = true; | 1699 | *err = true; | |
1679 | } | 1700 | } | |
1701 | #endif | |||
1680 | return true; | 1702 | return true; | |
1681 | } | 1703 | } | |
1682 | return false; | 1704 | return false; | |
1683 | } | 1705 | } | |
1684 | 1706 | |||
1685 | static bool kick_ring(struct intel_ring_buffer *ring) | 1707 | static bool kick_ring(struct intel_ring_buffer *ring) | |
1686 | { | 1708 | { | |
1687 | struct drm_device *dev = ring->dev; | 1709 | struct drm_device *dev = ring->dev; | |
1688 | struct drm_i915_private *dev_priv = dev->dev_private; | 1710 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1689 | u32 tmp = I915_READ_CTL(ring); | 1711 | u32 tmp = I915_READ_CTL(ring); | |
1690 | if (tmp & RING_WAIT) { | 1712 | if (tmp & RING_WAIT) { | |
1691 | DRM_ERROR("Kicking stuck wait on %s\n", | 1713 | DRM_ERROR("Kicking stuck wait on %s\n", | |
1692 | ring->name); | 1714 | ring->name); | |
1693 | I915_WRITE_CTL(ring, tmp); | 1715 | I915_WRITE_CTL(ring, tmp); | |
1694 | return true; | 1716 | return true; | |
1695 | } | 1717 | } | |
1696 | return false; | 1718 | return false; | |
1697 | } | 1719 | } | |
1698 | 1720 | |||
1699 | static bool i915_hangcheck_hung(struct drm_device *dev) | 1721 | static bool i915_hangcheck_hung(struct drm_device *dev) | |
1700 | { | 1722 | { | |
1701 | drm_i915_private_t *dev_priv = dev->dev_private; | 1723 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1702 | 1724 | |||
1703 | if (dev_priv->hangcheck_count++ > 1) { | 1725 | if (dev_priv->hangcheck_count++ > 1) { | |
1704 | bool hung = true; | 1726 | bool hung = true; | |
1705 | 1727 | |||
1706 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); | 1728 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); | |
1707 | i915_handle_error(dev, true); | 1729 | i915_handle_error(dev, true); | |
1708 | 1730 | |||
1709 | if (!IS_GEN2(dev)) { | 1731 | if (!IS_GEN2(dev)) { | |
1710 | struct intel_ring_buffer *ring; | 1732 | struct intel_ring_buffer *ring; | |
1711 | int i; | 1733 | int i; | |
1712 | 1734 | |||
1713 | /* Is the chip hanging on a WAIT_FOR_EVENT? | 1735 | /* Is the chip hanging on a WAIT_FOR_EVENT? | |
1714 | * If so we can simply poke the RB_WAIT bit | 1736 | * If so we can simply poke the RB_WAIT bit | |
1715 | * and break the hang. This should work on | 1737 | * and break the hang. This should work on | |
1716 | * all but the second generation chipsets. | 1738 | * all but the second generation chipsets. | |
1717 | */ | 1739 | */ | |
1718 | for_each_ring(ring, dev_priv, i) | 1740 | for_each_ring(ring, dev_priv, i) | |
1719 | hung &= !kick_ring(ring); | 1741 | hung &= !kick_ring(ring); | |
1720 | } | 1742 | } | |
1721 | 1743 | |||
1722 | return hung; | 1744 | return hung; | |
1723 | } | 1745 | } | |
1724 | 1746 | |||
1725 | return false; | 1747 | return false; | |
1726 | } | 1748 | } | |
1727 | 1749 | |||
1728 | /** | 1750 | /** | |
1729 | * This is called when the chip hasn't reported back with completed | 1751 | * This is called when the chip hasn't reported back with completed | |
1730 | * batchbuffers in a long time. The first time this is called we simply record | 1752 | * batchbuffers in a long time. The first time this is called we simply record | |
1731 | * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses | 1753 | * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses | |
1732 | * again, we assume the chip is wedged and try to fix it. | 1754 | * again, we assume the chip is wedged and try to fix it. | |
1733 | */ | 1755 | */ | |
1734 | void i915_hangcheck_elapsed(unsigned long data) | 1756 | void i915_hangcheck_elapsed(unsigned long data) | |
1735 | { | 1757 | { | |
1736 | struct drm_device *dev = (struct drm_device *)data; | 1758 | struct drm_device *dev = (struct drm_device *)data; | |
1737 | drm_i915_private_t *dev_priv = dev->dev_private; | 1759 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1738 | uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; | 1760 | uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; | |
1739 | struct intel_ring_buffer *ring; | 1761 | struct intel_ring_buffer *ring; | |
1740 | bool err = false, idle; | 1762 | bool err = false, idle; | |
1741 | int i; | 1763 | int i; | |
1742 | 1764 | |||
1743 | if (!i915_enable_hangcheck) | 1765 | if (!i915_enable_hangcheck) | |
1744 | return; | 1766 | return; | |
1745 | 1767 | |||
1746 | memset(acthd, 0, sizeof(acthd)); | 1768 | memset(acthd, 0, sizeof(acthd)); | |
1747 | idle = true; | 1769 | idle = true; | |
1748 | for_each_ring(ring, dev_priv, i) { | 1770 | for_each_ring(ring, dev_priv, i) { | |
1749 | idle &= i915_hangcheck_ring_idle(ring, &err); | 1771 | idle &= i915_hangcheck_ring_idle(ring, &err); | |
1750 | acthd[i] = intel_ring_get_active_head(ring); | 1772 | acthd[i] = intel_ring_get_active_head(ring); | |
1751 | } | 1773 | } | |
1752 | 1774 | |||
1753 | /* If all work is done then ACTHD clearly hasn't advanced. */ | 1775 | /* If all work is done then ACTHD clearly hasn't advanced. */ | |
1754 | if (idle) { | 1776 | if (idle) { | |
1755 | if (err) { | 1777 | if (err) { | |
1756 | if (i915_hangcheck_hung(dev)) | 1778 | if (i915_hangcheck_hung(dev)) | |
1757 | return; | 1779 | return; | |
1758 | 1780 | |||
1759 | goto repeat; | 1781 | goto repeat; | |
1760 | } | 1782 | } | |
1761 | 1783 | |||
1762 | dev_priv->hangcheck_count = 0; | 1784 | dev_priv->hangcheck_count = 0; | |
1763 | return; | 1785 | return; | |
1764 | } | 1786 | } | |
1765 | 1787 | |||
1766 | i915_get_extra_instdone(dev, instdone); | 1788 | i915_get_extra_instdone(dev, instdone); | |
1767 | if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && | 1789 | if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && | |
1768 | memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { | 1790 | memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { | |
1769 | if (i915_hangcheck_hung(dev)) | 1791 | if (i915_hangcheck_hung(dev)) | |
1770 | return; | 1792 | return; | |
1771 | } else { | 1793 | } else { | |
1772 | dev_priv->hangcheck_count = 0; | 1794 | dev_priv->hangcheck_count = 0; | |
1773 | 1795 | |||
1774 | memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); | 1796 | memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); | |
1775 | memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); | 1797 | memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); | |
1776 | } | 1798 | } | |
1777 | 1799 | |||
1778 | repeat: | 1800 | repeat: | |
1779 | /* Reset timer case chip hangs without another request being added */ | 1801 | /* Reset timer case chip hangs without another request being added */ | |
1780 | mod_timer(&dev_priv->hangcheck_timer, | 1802 | mod_timer(&dev_priv->hangcheck_timer, | |
1781 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | 1803 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | |
1782 | } | 1804 | } | |
1783 | 1805 | |||
1784 | /* drm_dma.h hooks | 1806 | /* drm_dma.h hooks | |
1785 | */ | 1807 | */ | |
1786 | static void ironlake_irq_preinstall(struct drm_device *dev) | 1808 | static void ironlake_irq_preinstall(struct drm_device *dev) | |
1787 | { | 1809 | { | |
1788 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1810 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1789 | 1811 | |||
1790 | atomic_set(&dev_priv->irq_received, 0); | 1812 | atomic_set(&dev_priv->irq_received, 0); | |
1791 | 1813 | |||
1792 | I915_WRITE(HWSTAM, 0xeffe); | 1814 | I915_WRITE(HWSTAM, 0xeffe); | |
1793 | 1815 | |||
1794 | /* XXX hotplug from PCH */ | 1816 | /* XXX hotplug from PCH */ | |
1795 | 1817 | |||
1796 | I915_WRITE(DEIMR, 0xffffffff); | 1818 | I915_WRITE(DEIMR, 0xffffffff); | |
1797 | I915_WRITE(DEIER, 0x0); | 1819 | I915_WRITE(DEIER, 0x0); | |
1798 | POSTING_READ(DEIER); | 1820 | POSTING_READ(DEIER); | |
1799 | 1821 | |||
1800 | /* and GT */ | 1822 | /* and GT */ | |
1801 | I915_WRITE(GTIMR, 0xffffffff); | 1823 | I915_WRITE(GTIMR, 0xffffffff); | |
1802 | I915_WRITE(GTIER, 0x0); | 1824 | I915_WRITE(GTIER, 0x0); | |
1803 | POSTING_READ(GTIER); | 1825 | POSTING_READ(GTIER); | |
1804 | 1826 | |||
1805 | /* south display irq */ | 1827 | /* south display irq */ | |
1806 | I915_WRITE(SDEIMR, 0xffffffff); | 1828 | I915_WRITE(SDEIMR, 0xffffffff); | |
1807 | I915_WRITE(SDEIER, 0x0); | 1829 | I915_WRITE(SDEIER, 0x0); | |
1808 | POSTING_READ(SDEIER); | 1830 | POSTING_READ(SDEIER); | |
1809 | } | 1831 | } | |
1810 | 1832 | |||
1811 | static void valleyview_irq_preinstall(struct drm_device *dev) | 1833 | static void valleyview_irq_preinstall(struct drm_device *dev) | |
1812 | { | 1834 | { | |
1813 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1835 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1814 | int pipe; | 1836 | int pipe; | |
1815 | 1837 | |||
1816 | atomic_set(&dev_priv->irq_received, 0); | 1838 | atomic_set(&dev_priv->irq_received, 0); | |
1817 | 1839 | |||
1818 | /* VLV magic */ | 1840 | /* VLV magic */ | |
1819 | I915_WRITE(VLV_IMR, 0); | 1841 | I915_WRITE(VLV_IMR, 0); | |
1820 | I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); | 1842 | I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); | |
1821 | I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); | 1843 | I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); | |
1822 | I915_WRITE(RING_IMR(BLT_RING_BASE), 0); | 1844 | I915_WRITE(RING_IMR(BLT_RING_BASE), 0); | |
1823 | 1845 | |||
1824 | /* and GT */ | 1846 | /* and GT */ | |
1825 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 1847 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
1826 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 1848 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
1827 | I915_WRITE(GTIMR, 0xffffffff); | 1849 | I915_WRITE(GTIMR, 0xffffffff); | |
1828 | I915_WRITE(GTIER, 0x0); | 1850 | I915_WRITE(GTIER, 0x0); | |
1829 | POSTING_READ(GTIER); | 1851 | POSTING_READ(GTIER); | |
1830 | 1852 | |||
1831 | I915_WRITE(DPINVGTT, 0xff); | 1853 | I915_WRITE(DPINVGTT, 0xff); | |
1832 | 1854 | |||
1833 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 1855 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
1834 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 1856 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
1835 | for_each_pipe(pipe) | 1857 | for_each_pipe(pipe) | |
1836 | I915_WRITE(PIPESTAT(pipe), 0xffff); | 1858 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
1837 | I915_WRITE(VLV_IIR, 0xffffffff); | 1859 | I915_WRITE(VLV_IIR, 0xffffffff); | |
1838 | I915_WRITE(VLV_IMR, 0xffffffff); | 1860 | I915_WRITE(VLV_IMR, 0xffffffff); | |
1839 | I915_WRITE(VLV_IER, 0x0); | 1861 | I915_WRITE(VLV_IER, 0x0); | |
1840 | POSTING_READ(VLV_IER); | 1862 | POSTING_READ(VLV_IER); | |
1841 | } | 1863 | } | |
1842 | 1864 | |||
1843 | /* | 1865 | /* | |
1844 | * Enable digital hotplug on the PCH, and configure the DP short pulse | 1866 | * Enable digital hotplug on the PCH, and configure the DP short pulse | |
1845 | * duration to 2ms (which is the minimum in the Display Port spec) | 1867 | * duration to 2ms (which is the minimum in the Display Port spec) | |
1846 | * | 1868 | * | |
1847 | * This register is the same on all known PCH chips. | 1869 | * This register is the same on all known PCH chips. | |
1848 | */ | 1870 | */ | |
1849 | 1871 | |||
1850 | static void ironlake_enable_pch_hotplug(struct drm_device *dev) | 1872 | static void ironlake_enable_pch_hotplug(struct drm_device *dev) | |
1851 | { | 1873 | { | |
1852 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1874 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1853 | u32 hotplug; | 1875 | u32 hotplug; | |
1854 | 1876 | |||
1855 | hotplug = I915_READ(PCH_PORT_HOTPLUG); | 1877 | hotplug = I915_READ(PCH_PORT_HOTPLUG); | |
1856 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); | 1878 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); | |
1857 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; | 1879 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; | |
1858 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; | 1880 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; | |
1859 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; | 1881 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; | |
1860 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | 1882 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | |
1861 | } | 1883 | } | |
1862 | 1884 | |||
1863 | static int ironlake_irq_postinstall(struct drm_device *dev) | 1885 | static int ironlake_irq_postinstall(struct drm_device *dev) | |
1864 | { | 1886 | { | |
1865 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1887 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1866 | /* enable kind of interrupts always enabled */ | 1888 | /* enable kind of interrupts always enabled */ | |
1867 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 1889 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | |
1868 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | 1890 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | |
1869 | u32 render_irqs; | 1891 | u32 render_irqs; | |
1870 | u32 hotplug_mask; | 1892 | u32 hotplug_mask; | |
1871 | 1893 | |||
1872 | dev_priv->irq_mask = ~display_mask; | 1894 | dev_priv->irq_mask = ~display_mask; | |
1873 | 1895 | |||
1874 | /* should always can generate irq */ | 1896 | /* should always can generate irq */ | |
1875 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 1897 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | |
1876 | I915_WRITE(DEIMR, dev_priv->irq_mask); | 1898 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
1877 | I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); | 1899 | I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); | |
1878 | POSTING_READ(DEIER); | 1900 | POSTING_READ(DEIER); | |
1879 | 1901 | |||
1880 | dev_priv->gt_irq_mask = ~0; | 1902 | dev_priv->gt_irq_mask = ~0; | |
1881 | 1903 | |||
1882 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 1904 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
1883 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 1905 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
1884 | 1906 | |||
1885 | if (IS_GEN6(dev)) | 1907 | if (IS_GEN6(dev)) | |
1886 | render_irqs = | 1908 | render_irqs = | |
1887 | GT_USER_INTERRUPT | | 1909 | GT_USER_INTERRUPT | | |
1888 | GEN6_BSD_USER_INTERRUPT | | 1910 | GEN6_BSD_USER_INTERRUPT | | |
1889 | GEN6_BLITTER_USER_INTERRUPT; | 1911 | GEN6_BLITTER_USER_INTERRUPT; | |
1890 | else | 1912 | else | |
1891 | render_irqs = | 1913 | render_irqs = | |
1892 | GT_USER_INTERRUPT | | 1914 | GT_USER_INTERRUPT | | |
1893 | GT_PIPE_NOTIFY | | 1915 | GT_PIPE_NOTIFY | | |
1894 | GT_BSD_USER_INTERRUPT; | 1916 | GT_BSD_USER_INTERRUPT; | |
1895 | I915_WRITE(GTIER, render_irqs); | 1917 | I915_WRITE(GTIER, render_irqs); | |
1896 | POSTING_READ(GTIER); | 1918 | POSTING_READ(GTIER); | |
1897 | 1919 | |||
1898 | if (HAS_PCH_CPT(dev)) { | 1920 | if (HAS_PCH_CPT(dev)) { | |
1899 | hotplug_mask = (SDE_CRT_HOTPLUG_CPT | | 1921 | hotplug_mask = (SDE_CRT_HOTPLUG_CPT | | |
1900 | SDE_PORTB_HOTPLUG_CPT | | 1922 | SDE_PORTB_HOTPLUG_CPT | | |
1901 | SDE_PORTC_HOTPLUG_CPT | | 1923 | SDE_PORTC_HOTPLUG_CPT | | |
1902 | SDE_PORTD_HOTPLUG_CPT); | 1924 | SDE_PORTD_HOTPLUG_CPT); | |
1903 | } else { | 1925 | } else { | |
1904 | hotplug_mask = (SDE_CRT_HOTPLUG | | 1926 | hotplug_mask = (SDE_CRT_HOTPLUG | | |
1905 | SDE_PORTB_HOTPLUG | | 1927 | SDE_PORTB_HOTPLUG | | |
1906 | SDE_PORTC_HOTPLUG | | 1928 | SDE_PORTC_HOTPLUG | | |
1907 | SDE_PORTD_HOTPLUG | | 1929 | SDE_PORTD_HOTPLUG | | |
1908 | SDE_AUX_MASK); | 1930 | SDE_AUX_MASK); | |
1909 | } | 1931 | } | |
1910 | 1932 | |||
1911 | dev_priv->pch_irq_mask = ~hotplug_mask; | 1933 | dev_priv->pch_irq_mask = ~hotplug_mask; | |
1912 | 1934 | |||
1913 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | 1935 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | |
1914 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); | 1936 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); | |
1915 | I915_WRITE(SDEIER, hotplug_mask); | 1937 | I915_WRITE(SDEIER, hotplug_mask); | |
1916 | POSTING_READ(SDEIER); | 1938 | POSTING_READ(SDEIER); | |
1917 | 1939 | |||
1918 | ironlake_enable_pch_hotplug(dev); | 1940 | ironlake_enable_pch_hotplug(dev); | |
1919 | 1941 | |||
1920 | if (IS_IRONLAKE_M(dev)) { | 1942 | if (IS_IRONLAKE_M(dev)) { | |
1921 | /* Clear & enable PCU event interrupts */ | 1943 | /* Clear & enable PCU event interrupts */ | |
1922 | I915_WRITE(DEIIR, DE_PCU_EVENT); | 1944 | I915_WRITE(DEIIR, DE_PCU_EVENT); | |
1923 | I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); | 1945 | I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); | |
1924 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); | 1946 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); | |
1925 | } | 1947 | } | |
1926 | 1948 | |||
1927 | return 0; | 1949 | return 0; | |
1928 | } | 1950 | } | |
1929 | 1951 | |||
1930 | static int ivybridge_irq_postinstall(struct drm_device *dev) | 1952 | static int ivybridge_irq_postinstall(struct drm_device *dev) | |
1931 | { | 1953 | { | |
1932 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1954 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1933 | /* enable kind of interrupts always enabled */ | 1955 | /* enable kind of interrupts always enabled */ | |
1934 | u32 display_mask = | 1956 | u32 display_mask = | |
1935 | DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | | 1957 | DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | | |
1936 | DE_PLANEC_FLIP_DONE_IVB | | 1958 | DE_PLANEC_FLIP_DONE_IVB | | |
1937 | DE_PLANEB_FLIP_DONE_IVB | | 1959 | DE_PLANEB_FLIP_DONE_IVB | | |
1938 | DE_PLANEA_FLIP_DONE_IVB; | 1960 | DE_PLANEA_FLIP_DONE_IVB; | |
1939 | u32 render_irqs; | 1961 | u32 render_irqs; | |
1940 | u32 hotplug_mask; | 1962 | u32 hotplug_mask; | |
1941 | 1963 | |||
1942 | dev_priv->irq_mask = ~display_mask; | 1964 | dev_priv->irq_mask = ~display_mask; | |
1943 | 1965 | |||
1944 | /* should always can generate irq */ | 1966 | /* should always can generate irq */ | |
1945 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 1967 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | |
1946 | I915_WRITE(DEIMR, dev_priv->irq_mask); | 1968 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
1947 | I915_WRITE(DEIER, | 1969 | I915_WRITE(DEIER, | |
1948 | display_mask | | 1970 | display_mask | | |
1949 | DE_PIPEC_VBLANK_IVB | | 1971 | DE_PIPEC_VBLANK_IVB | | |
1950 | DE_PIPEB_VBLANK_IVB | | 1972 | DE_PIPEB_VBLANK_IVB | | |
1951 | DE_PIPEA_VBLANK_IVB); | 1973 | DE_PIPEA_VBLANK_IVB); | |
1952 | POSTING_READ(DEIER); | 1974 | POSTING_READ(DEIER); | |
1953 | 1975 | |||
1954 | dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; | 1976 | dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; | |
1955 | 1977 | |||
1956 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 1978 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
1957 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 1979 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
1958 | 1980 | |||
1959 | render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | | 1981 | render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | | |
1960 | GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; | 1982 | GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; | |
1961 | I915_WRITE(GTIER, render_irqs); | 1983 | I915_WRITE(GTIER, render_irqs); | |
1962 | POSTING_READ(GTIER); | 1984 | POSTING_READ(GTIER); | |
1963 | 1985 | |||
1964 | hotplug_mask = (SDE_CRT_HOTPLUG_CPT | | 1986 | hotplug_mask = (SDE_CRT_HOTPLUG_CPT | | |
1965 | SDE_PORTB_HOTPLUG_CPT | | 1987 | SDE_PORTB_HOTPLUG_CPT | | |
1966 | SDE_PORTC_HOTPLUG_CPT | | 1988 | SDE_PORTC_HOTPLUG_CPT | | |
1967 | SDE_PORTD_HOTPLUG_CPT); | 1989 | SDE_PORTD_HOTPLUG_CPT); | |
1968 | dev_priv->pch_irq_mask = ~hotplug_mask; | 1990 | dev_priv->pch_irq_mask = ~hotplug_mask; | |
1969 | 1991 | |||
1970 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | 1992 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | |
1971 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); | 1993 | I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); | |
1972 | I915_WRITE(SDEIER, hotplug_mask); | 1994 | I915_WRITE(SDEIER, hotplug_mask); | |
1973 | POSTING_READ(SDEIER); | 1995 | POSTING_READ(SDEIER); | |
1974 | 1996 | |||
1975 | ironlake_enable_pch_hotplug(dev); | 1997 | ironlake_enable_pch_hotplug(dev); | |
1976 | 1998 | |||
1977 | return 0; | 1999 | return 0; | |
1978 | } | 2000 | } | |
1979 | 2001 | |||
1980 | static int valleyview_irq_postinstall(struct drm_device *dev) | 2002 | static int valleyview_irq_postinstall(struct drm_device *dev) | |
1981 | { | 2003 | { | |
1982 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2004 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1983 | u32 enable_mask; | 2005 | u32 enable_mask; | |
1984 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | 2006 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | |
1985 | u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; | 2007 | u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; | |
1986 | u32 render_irqs; | 2008 | u32 render_irqs; | |
1987 | u16 msid; | 2009 | u16 msid; | |
1988 | 2010 | |||
1989 | enable_mask = I915_DISPLAY_PORT_INTERRUPT; | 2011 | enable_mask = I915_DISPLAY_PORT_INTERRUPT; | |
1990 | enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 2012 | enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
1991 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | | 2013 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | | |
1992 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 2014 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
1993 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | 2015 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | |
1994 | 2016 | |||
1995 | /* | 2017 | /* | |
1996 | *Leave vblank interrupts masked initially. enable/disable will | 2018 | *Leave vblank interrupts masked initially. enable/disable will | |
1997 | * toggle them based on usage. | 2019 | * toggle them based on usage. | |
1998 | */ | 2020 | */ | |
1999 | dev_priv->irq_mask = (~enable_mask) | | 2021 | dev_priv->irq_mask = (~enable_mask) | | |
2000 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | | 2022 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | | |
2001 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | 2023 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | |
2002 | 2024 | |||
2003 | dev_priv->pipestat[0] = 0; | 2025 | dev_priv->pipestat[0] = 0; | |
2004 | dev_priv->pipestat[1] = 0; | 2026 | dev_priv->pipestat[1] = 0; | |
2005 | 2027 | |||
2006 | /* Hack for broken MSIs on VLV */ | 2028 | /* Hack for broken MSIs on VLV */ | |
2007 | pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); | 2029 | pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); | |
2008 | pci_read_config_word(dev->pdev, 0x98, &msid); | 2030 | pci_read_config_word(dev->pdev, 0x98, &msid); | |
2009 | msid &= 0xff; /* mask out delivery bits */ | 2031 | msid &= 0xff; /* mask out delivery bits */ | |
2010 | msid |= (1<<14); | 2032 | msid |= (1<<14); | |
2011 | pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); | 2033 | pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); | |
2012 | 2034 | |||
2013 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | 2035 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | |
2014 | I915_WRITE(VLV_IER, enable_mask); | 2036 | I915_WRITE(VLV_IER, enable_mask); | |
2015 | I915_WRITE(VLV_IIR, 0xffffffff); | 2037 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2016 | I915_WRITE(PIPESTAT(0), 0xffff); | 2038 | I915_WRITE(PIPESTAT(0), 0xffff); | |
2017 | I915_WRITE(PIPESTAT(1), 0xffff); | 2039 | I915_WRITE(PIPESTAT(1), 0xffff); | |
2018 | POSTING_READ(VLV_IER); | 2040 | POSTING_READ(VLV_IER); | |
2019 | 2041 | |||
2020 | i915_enable_pipestat(dev_priv, 0, pipestat_enable); | 2042 | i915_enable_pipestat(dev_priv, 0, pipestat_enable); | |
2021 | i915_enable_pipestat(dev_priv, 1, pipestat_enable); | 2043 | i915_enable_pipestat(dev_priv, 1, pipestat_enable); | |
2022 | 2044 | |||
2023 | I915_WRITE(VLV_IIR, 0xffffffff); | 2045 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2024 | I915_WRITE(VLV_IIR, 0xffffffff); | 2046 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2025 | 2047 | |||
2026 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 2048 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
2027 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 2049 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
2028 | 2050 | |||
2029 | render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | | 2051 | render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | | |
2030 | GEN6_BLITTER_USER_INTERRUPT; | 2052 | GEN6_BLITTER_USER_INTERRUPT; | |
2031 | I915_WRITE(GTIER, render_irqs); | 2053 | I915_WRITE(GTIER, render_irqs); | |
2032 | POSTING_READ(GTIER); | 2054 | POSTING_READ(GTIER); | |
2033 | 2055 | |||
2034 | /* ack & enable invalid PTE error interrupts */ | 2056 | /* ack & enable invalid PTE error interrupts */ | |
2035 | #if 0 /* FIXME: add support to irq handler for checking these bits */ | 2057 | #if 0 /* FIXME: add support to irq handler for checking these bits */ | |
2036 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); | 2058 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); | |
2037 | I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); | 2059 | I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); | |
2038 | #endif | 2060 | #endif | |
2039 | 2061 | |||
2040 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); | 2062 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); | |
2041 | /* Note HDMI and DP share bits */ | 2063 | /* Note HDMI and DP share bits */ | |
2042 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | 2064 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | |
2043 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | 2065 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | |
2044 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | 2066 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | |
2045 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | 2067 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | |
2046 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | 2068 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | |
2047 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | 2069 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | |
2048 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) | 2070 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) | |
2049 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | 2071 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | |
2050 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) | 2072 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) | |
2051 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | 2073 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | |
2052 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { | 2074 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { | |
2053 | hotplug_en |= CRT_HOTPLUG_INT_EN; | 2075 | hotplug_en |= CRT_HOTPLUG_INT_EN; | |
2054 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | 2076 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | |
2055 | } | 2077 | } | |
2056 | 2078 | |||
2057 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | 2079 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | |
2058 | 2080 | |||
2059 | return 0; | 2081 | return 0; | |
2060 | } | 2082 | } | |
2061 | 2083 | |||
2062 | static void valleyview_irq_uninstall(struct drm_device *dev) | 2084 | static void valleyview_irq_uninstall(struct drm_device *dev) | |
2063 | { | 2085 | { | |
2064 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2086 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2065 | int pipe; | 2087 | int pipe; | |
2066 | 2088 | |||
2067 | if (!dev_priv) | 2089 | if (!dev_priv) | |
2068 | return; | 2090 | return; | |
2069 | 2091 | |||
2070 | for_each_pipe(pipe) | 2092 | for_each_pipe(pipe) | |
2071 | I915_WRITE(PIPESTAT(pipe), 0xffff); | 2093 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
2072 | 2094 | |||
2073 | I915_WRITE(HWSTAM, 0xffffffff); | 2095 | I915_WRITE(HWSTAM, 0xffffffff); | |
2074 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 2096 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
2075 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 2097 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
2076 | for_each_pipe(pipe) | 2098 | for_each_pipe(pipe) | |
2077 | I915_WRITE(PIPESTAT(pipe), 0xffff); | 2099 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
2078 | I915_WRITE(VLV_IIR, 0xffffffff); | 2100 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2079 | I915_WRITE(VLV_IMR, 0xffffffff); | 2101 | I915_WRITE(VLV_IMR, 0xffffffff); | |
2080 | I915_WRITE(VLV_IER, 0x0); | 2102 | I915_WRITE(VLV_IER, 0x0); | |
2081 | POSTING_READ(VLV_IER); | 2103 | POSTING_READ(VLV_IER); | |
2082 | } | 2104 | } | |
2083 | 2105 | |||
2084 | static void ironlake_irq_uninstall(struct drm_device *dev) | 2106 | static void ironlake_irq_uninstall(struct drm_device *dev) | |
2085 | { | 2107 | { | |
2086 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2108 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2087 | 2109 | |||
2088 | if (!dev_priv) | 2110 | if (!dev_priv) | |
2089 | return; | 2111 | return; | |
2090 | 2112 | |||
2091 | I915_WRITE(HWSTAM, 0xffffffff); | 2113 | I915_WRITE(HWSTAM, 0xffffffff); | |
2092 | 2114 | |||
2093 | I915_WRITE(DEIMR, 0xffffffff); | 2115 | I915_WRITE(DEIMR, 0xffffffff); | |
2094 | I915_WRITE(DEIER, 0x0); | 2116 | I915_WRITE(DEIER, 0x0); | |
2095 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 2117 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | |
2096 | 2118 | |||
2097 | I915_WRITE(GTIMR, 0xffffffff); | 2119 | I915_WRITE(GTIMR, 0xffffffff); | |
2098 | I915_WRITE(GTIER, 0x0); | 2120 | I915_WRITE(GTIER, 0x0); | |
2099 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 2121 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
2100 | 2122 | |||
2101 | I915_WRITE(SDEIMR, 0xffffffff); | 2123 | I915_WRITE(SDEIMR, 0xffffffff); | |
2102 | I915_WRITE(SDEIER, 0x0); | 2124 | I915_WRITE(SDEIER, 0x0); | |
2103 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | 2125 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | |
2104 | } | 2126 | } | |
2105 | 2127 | |||
2106 | static void i8xx_irq_preinstall(struct drm_device * dev) | 2128 | static void i8xx_irq_preinstall(struct drm_device * dev) | |
2107 | { | 2129 | { | |
2108 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2130 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2109 | int pipe; | 2131 | int pipe; | |
2110 | 2132 | |||
2111 | atomic_set(&dev_priv->irq_received, 0); | 2133 | atomic_set(&dev_priv->irq_received, 0); | |
2112 | 2134 | |||
2113 | for_each_pipe(pipe) | 2135 | for_each_pipe(pipe) | |
2114 | I915_WRITE(PIPESTAT(pipe), 0); | 2136 | I915_WRITE(PIPESTAT(pipe), 0); | |
2115 | I915_WRITE16(IMR, 0xffff); | 2137 | I915_WRITE16(IMR, 0xffff); | |
2116 | I915_WRITE16(IER, 0x0); | 2138 | I915_WRITE16(IER, 0x0); | |
2117 | POSTING_READ16(IER); | 2139 | POSTING_READ16(IER); | |
2118 | } | 2140 | } | |
2119 | 2141 | |||
2120 | static int i8xx_irq_postinstall(struct drm_device *dev) | 2142 | static int i8xx_irq_postinstall(struct drm_device *dev) | |
2121 | { | 2143 | { | |
2122 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2144 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2123 | 2145 | |||
2124 | dev_priv->pipestat[0] = 0; | 2146 | dev_priv->pipestat[0] = 0; | |
2125 | dev_priv->pipestat[1] = 0; | 2147 | dev_priv->pipestat[1] = 0; | |
2126 | 2148 | |||
2127 | I915_WRITE16(EMR, | 2149 | I915_WRITE16(EMR, | |
2128 | ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); | 2150 | ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); | |
2129 | 2151 | |||
2130 | /* Unmask the interrupts that we always want on. */ | 2152 | /* Unmask the interrupts that we always want on. */ | |
2131 | dev_priv->irq_mask = | 2153 | dev_priv->irq_mask = | |
2132 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 2154 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2133 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 2155 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2134 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 2156 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2135 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | 2157 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
2136 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | 2158 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
2137 | I915_WRITE16(IMR, dev_priv->irq_mask); | 2159 | I915_WRITE16(IMR, dev_priv->irq_mask); | |
2138 | 2160 | |||
2139 | I915_WRITE16(IER, | 2161 | I915_WRITE16(IER, | |
2140 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 2162 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2141 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 2163 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2142 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | 2164 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | |
2143 | I915_USER_INTERRUPT); | 2165 | I915_USER_INTERRUPT); | |
2144 | POSTING_READ16(IER); | 2166 | POSTING_READ16(IER); | |
2145 | 2167 | |||
2146 | return 0; | 2168 | return 0; | |
2147 | } | 2169 | } | |
2148 | 2170 | |||
2149 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) | 2171 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) | |
2150 | { | 2172 | { | |
2151 | struct drm_device *dev = (struct drm_device *) arg; | 2173 | struct drm_device *dev = (struct drm_device *) arg; | |
2152 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2174 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2153 | u16 iir, new_iir; | 2175 | u16 iir, new_iir; | |
2154 | u32 pipe_stats[2]; | 2176 | u32 pipe_stats[2]; | |
2155 | unsigned long irqflags; | 2177 | unsigned long irqflags; | |
2156 | int irq_received; | 2178 | int irq_received; | |
2157 | int pipe; | 2179 | int pipe; | |
2158 | u16 flip_mask = | 2180 | u16 flip_mask = | |
2159 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 2181 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2160 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | 2182 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
2161 | 2183 | |||
2162 | atomic_inc(&dev_priv->irq_received); | 2184 | atomic_inc(&dev_priv->irq_received); | |
2163 | 2185 | |||
2164 | iir = I915_READ16(IIR); | 2186 | iir = I915_READ16(IIR); | |
2165 | if (iir == 0) | 2187 | if (iir == 0) | |
2166 | return IRQ_NONE; | 2188 | return IRQ_NONE; | |
2167 | 2189 | |||
2168 | while (iir & ~flip_mask) { | 2190 | while (iir & ~flip_mask) { | |
2169 | /* Can't rely on pipestat interrupt bit in iir as it might | 2191 | /* Can't rely on pipestat interrupt bit in iir as it might | |
2170 | * have been cleared after the pipestat interrupt was received. | 2192 | * have been cleared after the pipestat interrupt was received. | |
2171 | * It doesn't set the bit in iir again, but it still produces | 2193 | * It doesn't set the bit in iir again, but it still produces | |
2172 | * interrupts (for non-MSI). | 2194 | * interrupts (for non-MSI). | |
2173 | */ | 2195 | */ | |
2174 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 2196 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2175 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 2197 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
2176 | i915_handle_error(dev, false); | 2198 | i915_handle_error(dev, false); | |
2177 | 2199 | |||
2178 | for_each_pipe(pipe) { | 2200 | for_each_pipe(pipe) { | |
2179 | int reg = PIPESTAT(pipe); | 2201 | int reg = PIPESTAT(pipe); | |
2180 | pipe_stats[pipe] = I915_READ(reg); | 2202 | pipe_stats[pipe] = I915_READ(reg); | |
2181 | 2203 | |||
2182 | /* | 2204 | /* | |
2183 | * Clear the PIPE*STAT regs before the IIR | 2205 | * Clear the PIPE*STAT regs before the IIR | |
2184 | */ | 2206 | */ | |
2185 | if (pipe_stats[pipe] & 0x8000ffff) { | 2207 | if (pipe_stats[pipe] & 0x8000ffff) { | |
2186 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | 2208 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
2187 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | 2209 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
2188 | pipe_name(pipe)); | 2210 | pipe_name(pipe)); | |
2189 | I915_WRITE(reg, pipe_stats[pipe]); | 2211 | I915_WRITE(reg, pipe_stats[pipe]); | |
2190 | irq_received = 1; | 2212 | irq_received = 1; | |
2191 | } | 2213 | } | |
2192 | } | 2214 | } | |
2193 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 2215 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
2194 | 2216 | |||
2195 | I915_WRITE16(IIR, iir & ~flip_mask); | 2217 | I915_WRITE16(IIR, iir & ~flip_mask); | |
2196 | new_iir = I915_READ16(IIR); /* Flush posted writes */ | 2218 | new_iir = I915_READ16(IIR); /* Flush posted writes */ | |
2197 | 2219 | |||
2198 | i915_update_dri1_breadcrumb(dev); | 2220 | i915_update_dri1_breadcrumb(dev); | |
2199 | 2221 | |||
2200 | if (iir & I915_USER_INTERRUPT) | 2222 | if (iir & I915_USER_INTERRUPT) | |
2201 | notify_ring(dev, &dev_priv->ring[RCS]); | 2223 | notify_ring(dev, &dev_priv->ring[RCS]); | |
2202 | 2224 | |||
2203 | if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && | 2225 | if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && | |
2204 | drm_handle_vblank(dev, 0)) { | 2226 | drm_handle_vblank(dev, 0)) { | |
2205 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { | 2227 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { | |
2206 | intel_prepare_page_flip(dev, 0); | 2228 | intel_prepare_page_flip(dev, 0); | |
2207 | intel_finish_page_flip(dev, 0); | 2229 | intel_finish_page_flip(dev, 0); | |
2208 | flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; | 2230 | flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; | |
2209 | } | 2231 | } | |
2210 | } | 2232 | } | |
2211 | 2233 | |||
2212 | if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && | 2234 | if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && | |
2213 | drm_handle_vblank(dev, 1)) { | 2235 | drm_handle_vblank(dev, 1)) { | |
2214 | if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { | 2236 | if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { | |
2215 | intel_prepare_page_flip(dev, 1); | 2237 | intel_prepare_page_flip(dev, 1); | |
2216 | intel_finish_page_flip(dev, 1); | 2238 | intel_finish_page_flip(dev, 1); | |
2217 | flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | 2239 | flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
2218 | } | 2240 | } | |
2219 | } | 2241 | } | |
2220 | 2242 | |||
2221 | iir = new_iir; | 2243 | iir = new_iir; | |
2222 | } | 2244 | } | |
2223 | 2245 | |||
2224 | return IRQ_HANDLED; | 2246 | return IRQ_HANDLED; | |
2225 | } | 2247 | } | |
2226 | 2248 | |||
2227 | static void i8xx_irq_uninstall(struct drm_device * dev) | 2249 | static void i8xx_irq_uninstall(struct drm_device * dev) | |
2228 | { | 2250 | { | |
2229 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2251 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2230 | int pipe; | 2252 | int pipe; | |
2231 | 2253 | |||
2232 | for_each_pipe(pipe) { | 2254 | for_each_pipe(pipe) { | |
2233 | /* Clear enable bits; then clear status bits */ | 2255 | /* Clear enable bits; then clear status bits */ | |
2234 | I915_WRITE(PIPESTAT(pipe), 0); | 2256 | I915_WRITE(PIPESTAT(pipe), 0); | |
2235 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); | 2257 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); | |
2236 | } | 2258 | } | |
2237 | I915_WRITE16(IMR, 0xffff); | 2259 | I915_WRITE16(IMR, 0xffff); | |
2238 | I915_WRITE16(IER, 0x0); | 2260 | I915_WRITE16(IER, 0x0); | |
2239 | I915_WRITE16(IIR, I915_READ16(IIR)); | 2261 | I915_WRITE16(IIR, I915_READ16(IIR)); | |
2240 | } | 2262 | } | |
2241 | 2263 | |||
2242 | static void i915_irq_preinstall(struct drm_device * dev) | 2264 | static void i915_irq_preinstall(struct drm_device * dev) | |
2243 | { | 2265 | { | |
2244 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2266 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2245 | int pipe; | 2267 | int pipe; | |
2246 | 2268 | |||
2247 | atomic_set(&dev_priv->irq_received, 0); | 2269 | atomic_set(&dev_priv->irq_received, 0); | |
2248 | 2270 | |||
2249 | if (I915_HAS_HOTPLUG(dev)) { | 2271 | if (I915_HAS_HOTPLUG(dev)) { | |
2250 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 2272 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
2251 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 2273 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
2252 | } | 2274 | } | |
2253 | 2275 | |||
2254 | I915_WRITE16(HWSTAM, 0xeffe); | 2276 | I915_WRITE16(HWSTAM, 0xeffe); | |
2255 | for_each_pipe(pipe) | 2277 | for_each_pipe(pipe) | |
2256 | I915_WRITE(PIPESTAT(pipe), 0); | 2278 | I915_WRITE(PIPESTAT(pipe), 0); | |
2257 | I915_WRITE(IMR, 0xffffffff); | 2279 | I915_WRITE(IMR, 0xffffffff); | |
2258 | I915_WRITE(IER, 0x0); | 2280 | I915_WRITE(IER, 0x0); | |
2259 | POSTING_READ(IER); | 2281 | POSTING_READ(IER); | |
2260 | } | 2282 | } | |
2261 | 2283 | |||
2262 | static int i915_irq_postinstall(struct drm_device *dev) | 2284 | static int i915_irq_postinstall(struct drm_device *dev) | |
2263 | { | 2285 | { | |
2264 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2286 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2265 | u32 enable_mask; | 2287 | u32 enable_mask; | |
2266 | 2288 | |||
2267 | dev_priv->pipestat[0] = 0; | 2289 | dev_priv->pipestat[0] = 0; | |
2268 | dev_priv->pipestat[1] = 0; | 2290 | dev_priv->pipestat[1] = 0; | |
2269 | 2291 | |||
2270 | I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); | 2292 | I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); | |
2271 | 2293 | |||
2272 | /* Unmask the interrupts that we always want on. */ | 2294 | /* Unmask the interrupts that we always want on. */ | |
2273 | dev_priv->irq_mask = | 2295 | dev_priv->irq_mask = | |
2274 | ~(I915_ASLE_INTERRUPT | | 2296 | ~(I915_ASLE_INTERRUPT | | |
2275 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 2297 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2276 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 2298 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2277 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 2299 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2278 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | 2300 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
2279 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | 2301 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
2280 | 2302 | |||
2281 | enable_mask = | 2303 | enable_mask = | |
2282 | I915_ASLE_INTERRUPT | | 2304 | I915_ASLE_INTERRUPT | | |
2283 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 2305 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2284 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 2306 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2285 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | 2307 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | |
2286 | I915_USER_INTERRUPT; | 2308 | I915_USER_INTERRUPT; | |
2287 | 2309 | |||
2288 | if (I915_HAS_HOTPLUG(dev)) { | 2310 | if (I915_HAS_HOTPLUG(dev)) { | |
2289 | /* Enable in IER... */ | 2311 | /* Enable in IER... */ | |
2290 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | 2312 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | |
2291 | /* and unmask in IMR */ | 2313 | /* and unmask in IMR */ | |
2292 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; | 2314 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; | |
2293 | } | 2315 | } | |
2294 | 2316 | |||
2295 | I915_WRITE(IMR, dev_priv->irq_mask); | 2317 | I915_WRITE(IMR, dev_priv->irq_mask); | |
2296 | I915_WRITE(IER, enable_mask); | 2318 | I915_WRITE(IER, enable_mask); | |
2297 | POSTING_READ(IER); | 2319 | POSTING_READ(IER); | |
2298 | 2320 | |||
2299 | if (I915_HAS_HOTPLUG(dev)) { | 2321 | if (I915_HAS_HOTPLUG(dev)) { | |
2300 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | 2322 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | |
2301 | 2323 | |||
2302 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | 2324 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | |
2303 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | 2325 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | |
2304 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | 2326 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | |
2305 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | 2327 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | |
2306 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | 2328 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | |
2307 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | 2329 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | |
2308 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) | 2330 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) | |
2309 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | 2331 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | |
2310 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) | 2332 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) | |
2311 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | 2333 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | |
2312 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { | 2334 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { | |
2313 | hotplug_en |= CRT_HOTPLUG_INT_EN; | 2335 | hotplug_en |= CRT_HOTPLUG_INT_EN; | |
2314 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | 2336 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | |
2315 | } | 2337 | } | |
2316 | 2338 | |||
2317 | /* Ignore TV since it's buggy */ | 2339 | /* Ignore TV since it's buggy */ | |
2318 | 2340 | |||
2319 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | 2341 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | |
2320 | } | 2342 | } | |
2321 | 2343 | |||
2322 | intel_opregion_enable_asle(dev); | 2344 | intel_opregion_enable_asle(dev); | |
2323 | 2345 | |||
2324 | return 0; | 2346 | return 0; | |
2325 | } | 2347 | } | |
2326 | 2348 | |||
2327 | static irqreturn_t i915_irq_handler(int irq, void *arg) | 2349 | static irqreturn_t i915_irq_handler(int irq, void *arg) | |
2328 | { | 2350 | { | |
2329 | struct drm_device *dev = (struct drm_device *) arg; | 2351 | struct drm_device *dev = (struct drm_device *) arg; | |
2330 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2352 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2331 | u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; | 2353 | u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; | |
2332 | unsigned long irqflags; | 2354 | unsigned long irqflags; | |
2333 | u32 flip_mask = | 2355 | u32 flip_mask = | |
2334 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 2356 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2335 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | 2357 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
2336 | u32 flip[2] = { | 2358 | u32 flip[2] = { | |
2337 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT, | 2359 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT, | |
2338 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2360 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
2339 | }; | 2361 | }; | |
2340 | int pipe, ret = IRQ_NONE; | 2362 | int pipe, ret = IRQ_NONE; | |
2341 | 2363 | |||
2342 | atomic_inc(&dev_priv->irq_received); | 2364 | atomic_inc(&dev_priv->irq_received); | |
2343 | 2365 | |||
2344 | iir = I915_READ(IIR); | 2366 | iir = I915_READ(IIR); | |
2345 | do { | 2367 | do { | |
2346 | bool irq_received = (iir & ~flip_mask) != 0; | 2368 | bool irq_received = (iir & ~flip_mask) != 0; | |
2347 | bool blc_event = false; | 2369 | bool blc_event = false; | |
2348 | 2370 | |||
2349 | /* Can't rely on pipestat interrupt bit in iir as it might | 2371 | /* Can't rely on pipestat interrupt bit in iir as it might | |
2350 | * have been cleared after the pipestat interrupt was received. | 2372 | * have been cleared after the pipestat interrupt was received. | |
2351 | * It doesn't set the bit in iir again, but it still produces | 2373 | * It doesn't set the bit in iir again, but it still produces | |
2352 | * interrupts (for non-MSI). | 2374 | * interrupts (for non-MSI). | |
2353 | */ | 2375 | */ | |
2354 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 2376 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2355 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 2377 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
2356 | i915_handle_error(dev, false); | 2378 | i915_handle_error(dev, false); | |
2357 | 2379 | |||
2358 | for_each_pipe(pipe) { | 2380 | for_each_pipe(pipe) { | |
2359 | int reg = PIPESTAT(pipe); | 2381 | int reg = PIPESTAT(pipe); | |
2360 | pipe_stats[pipe] = I915_READ(reg); | 2382 | pipe_stats[pipe] = I915_READ(reg); | |
2361 | 2383 | |||
2362 | /* Clear the PIPE*STAT regs before the IIR */ | 2384 | /* Clear the PIPE*STAT regs before the IIR */ | |
2363 | if (pipe_stats[pipe] & 0x8000ffff) { | 2385 | if (pipe_stats[pipe] & 0x8000ffff) { | |
2364 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | 2386 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
2365 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | 2387 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
2366 | pipe_name(pipe)); | 2388 | pipe_name(pipe)); | |
2367 | I915_WRITE(reg, pipe_stats[pipe]); | 2389 | I915_WRITE(reg, pipe_stats[pipe]); | |
2368 | irq_received = true; | 2390 | irq_received = true; | |
2369 | } | 2391 | } | |
2370 | } | 2392 | } | |
2371 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 2393 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
2372 | 2394 | |||
2373 | if (!irq_received) | 2395 | if (!irq_received) | |
2374 | break; | 2396 | break; | |
2375 | 2397 | |||
2376 | /* Consume port. Then clear IIR or we'll miss events */ | 2398 | /* Consume port. Then clear IIR or we'll miss events */ | |
2377 | if ((I915_HAS_HOTPLUG(dev)) && | 2399 | if ((I915_HAS_HOTPLUG(dev)) && | |
2378 | (iir & I915_DISPLAY_PORT_INTERRUPT)) { | 2400 | (iir & I915_DISPLAY_PORT_INTERRUPT)) { | |
2379 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 2401 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | |
2380 | 2402 | |||
2381 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | 2403 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | |
2382 | hotplug_status); | 2404 | hotplug_status); | |
2383 | if (hotplug_status & dev_priv->hotplug_supported_mask) | 2405 | if (hotplug_status & dev_priv->hotplug_supported_mask) | |
2384 | queue_work(dev_priv->wq, | 2406 | queue_work(dev_priv->wq, | |
2385 | &dev_priv->hotplug_work); | 2407 | &dev_priv->hotplug_work); | |
2386 | 2408 | |||
2387 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 2409 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | |
2388 | POSTING_READ(PORT_HOTPLUG_STAT); | 2410 | POSTING_READ(PORT_HOTPLUG_STAT); | |
2389 | } | 2411 | } | |
2390 | 2412 | |||
2391 | I915_WRITE(IIR, iir & ~flip_mask); | 2413 | I915_WRITE(IIR, iir & ~flip_mask); | |
2392 | new_iir = I915_READ(IIR); /* Flush posted writes */ | 2414 | new_iir = I915_READ(IIR); /* Flush posted writes */ | |
2393 | 2415 | |||
2394 | if (iir & I915_USER_INTERRUPT) | 2416 | if (iir & I915_USER_INTERRUPT) | |
2395 | notify_ring(dev, &dev_priv->ring[RCS]); | 2417 | notify_ring(dev, &dev_priv->ring[RCS]); | |
2396 | 2418 | |||
2397 | for_each_pipe(pipe) { | 2419 | for_each_pipe(pipe) { | |
2398 | int plane = pipe; | 2420 | int plane = pipe; | |
2399 | if (IS_MOBILE(dev)) | 2421 | if (IS_MOBILE(dev)) | |
2400 | plane = !plane; | 2422 | plane = !plane; | |
2401 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && | 2423 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && | |
2402 | drm_handle_vblank(dev, pipe)) { | 2424 | drm_handle_vblank(dev, pipe)) { | |
2403 | if (iir & flip[plane]) { | 2425 | if (iir & flip[plane]) { | |
2404 | intel_prepare_page_flip(dev, plane); | 2426 | intel_prepare_page_flip(dev, plane); | |
2405 | intel_finish_page_flip(dev, pipe); | 2427 | intel_finish_page_flip(dev, pipe); | |
2406 | flip_mask &= ~flip[plane]; | 2428 | flip_mask &= ~flip[plane]; | |
2407 | } | 2429 | } | |
2408 | } | 2430 | } | |
2409 | 2431 | |||
2410 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | 2432 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
2411 | blc_event = true; | 2433 | blc_event = true; | |
2412 | } | 2434 | } | |
2413 | 2435 | |||
2414 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) | 2436 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) | |
2415 | intel_opregion_asle_intr(dev); | 2437 | intel_opregion_asle_intr(dev); | |
2416 | 2438 | |||
2417 | /* With MSI, interrupts are only generated when iir | 2439 | /* With MSI, interrupts are only generated when iir | |
2418 | * transitions from zero to nonzero. If another bit got | 2440 | * transitions from zero to nonzero. If another bit got | |
2419 | * set while we were handling the existing iir bits, then | 2441 | * set while we were handling the existing iir bits, then | |
2420 | * we would never get another interrupt. | 2442 | * we would never get another interrupt. | |
2421 | * | 2443 | * | |
2422 | * This is fine on non-MSI as well, as if we hit this path | 2444 | * This is fine on non-MSI as well, as if we hit this path | |
2423 | * we avoid exiting the interrupt handler only to generate | 2445 | * we avoid exiting the interrupt handler only to generate | |
2424 | * another one. | 2446 | * another one. | |
2425 | * | 2447 | * | |
2426 | * Note that for MSI this could cause a stray interrupt report | 2448 | * Note that for MSI this could cause a stray interrupt report | |
2427 | * if an interrupt landed in the time between writing IIR and | 2449 | * if an interrupt landed in the time between writing IIR and | |
2428 | * the posting read. This should be rare enough to never | 2450 | * the posting read. This should be rare enough to never | |
2429 | * trigger the 99% of 100,000 interrupts test for disabling | 2451 | * trigger the 99% of 100,000 interrupts test for disabling | |
2430 | * stray interrupts. | 2452 | * stray interrupts. | |
2431 | */ | 2453 | */ | |
2432 | ret = IRQ_HANDLED; | 2454 | ret = IRQ_HANDLED; | |
2433 | iir = new_iir; | 2455 | iir = new_iir; | |
2434 | } while (iir & ~flip_mask); | 2456 | } while (iir & ~flip_mask); | |
2435 | 2457 | |||
2436 | i915_update_dri1_breadcrumb(dev); | 2458 | i915_update_dri1_breadcrumb(dev); | |
2437 | 2459 | |||
2438 | return ret; | 2460 | return ret; | |
2439 | } | 2461 | } | |
2440 | 2462 | |||
2441 | static void i915_irq_uninstall(struct drm_device * dev) | 2463 | static void i915_irq_uninstall(struct drm_device * dev) | |
2442 | { | 2464 | { | |
2443 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2465 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2444 | int pipe; | 2466 | int pipe; | |
2445 | 2467 | |||
2446 | if (I915_HAS_HOTPLUG(dev)) { | 2468 | if (I915_HAS_HOTPLUG(dev)) { | |
2447 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 2469 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
2448 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 2470 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
2449 | } | 2471 | } | |
2450 | 2472 | |||
2451 | I915_WRITE16(HWSTAM, 0xffff); | 2473 | I915_WRITE16(HWSTAM, 0xffff); | |
2452 | for_each_pipe(pipe) { | 2474 | for_each_pipe(pipe) { | |
2453 | /* Clear enable bits; then clear status bits */ | 2475 | /* Clear enable bits; then clear status bits */ | |
2454 | I915_WRITE(PIPESTAT(pipe), 0); | 2476 | I915_WRITE(PIPESTAT(pipe), 0); | |
2455 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); | 2477 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); | |
2456 | } | 2478 | } | |
2457 | I915_WRITE(IMR, 0xffffffff); | 2479 | I915_WRITE(IMR, 0xffffffff); | |
2458 | I915_WRITE(IER, 0x0); | 2480 | I915_WRITE(IER, 0x0); | |
2459 | 2481 | |||
2460 | I915_WRITE(IIR, I915_READ(IIR)); | 2482 | I915_WRITE(IIR, I915_READ(IIR)); | |
2461 | } | 2483 | } | |
2462 | 2484 | |||
2463 | static void i965_irq_preinstall(struct drm_device * dev) | 2485 | static void i965_irq_preinstall(struct drm_device * dev) | |
2464 | { | 2486 | { | |
2465 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2487 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2466 | int pipe; | 2488 | int pipe; | |
2467 | 2489 | |||
2468 | atomic_set(&dev_priv->irq_received, 0); | 2490 | atomic_set(&dev_priv->irq_received, 0); | |
2469 | 2491 | |||
2470 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 2492 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
2471 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 2493 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
2472 | 2494 | |||
2473 | I915_WRITE(HWSTAM, 0xeffe); | 2495 | I915_WRITE(HWSTAM, 0xeffe); | |
2474 | for_each_pipe(pipe) | 2496 | for_each_pipe(pipe) | |
2475 | I915_WRITE(PIPESTAT(pipe), 0); | 2497 | I915_WRITE(PIPESTAT(pipe), 0); | |
2476 | I915_WRITE(IMR, 0xffffffff); | 2498 | I915_WRITE(IMR, 0xffffffff); | |
2477 | I915_WRITE(IER, 0x0); | 2499 | I915_WRITE(IER, 0x0); | |
2478 | POSTING_READ(IER); | 2500 | POSTING_READ(IER); | |
2479 | } | 2501 | } | |
2480 | 2502 | |||
2481 | static int i965_irq_postinstall(struct drm_device *dev) | 2503 | static int i965_irq_postinstall(struct drm_device *dev) | |
2482 | { | 2504 | { | |
2483 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2505 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2484 | u32 hotplug_en; | 2506 | u32 hotplug_en; | |
2485 | u32 enable_mask; | 2507 | u32 enable_mask; | |
2486 | u32 error_mask; | 2508 | u32 error_mask; | |
2487 | 2509 | |||
2488 | /* Unmask the interrupts that we always want on. */ | 2510 | /* Unmask the interrupts that we always want on. */ | |
2489 | dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | | 2511 | dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | | |
2490 | I915_DISPLAY_PORT_INTERRUPT | | 2512 | I915_DISPLAY_PORT_INTERRUPT | | |
2491 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 2513 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2492 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | 2514 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2493 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | 2515 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2494 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | 2516 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
2495 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | 2517 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
2496 | 2518 | |||
2497 | enable_mask = ~dev_priv->irq_mask; | 2519 | enable_mask = ~dev_priv->irq_mask; | |
2498 | enable_mask |= I915_USER_INTERRUPT; | 2520 | enable_mask |= I915_USER_INTERRUPT; | |
2499 | 2521 | |||
2500 | if (IS_G4X(dev)) | 2522 | if (IS_G4X(dev)) | |
2501 | enable_mask |= I915_BSD_USER_INTERRUPT; | 2523 | enable_mask |= I915_BSD_USER_INTERRUPT; | |
2502 | 2524 | |||
2503 | dev_priv->pipestat[0] = 0; | 2525 | dev_priv->pipestat[0] = 0; | |
2504 | dev_priv->pipestat[1] = 0; | 2526 | dev_priv->pipestat[1] = 0; | |
2505 | 2527 | |||
2506 | /* | 2528 | /* | |
2507 | * Enable some error detection, note the instruction error mask | 2529 | * Enable some error detection, note the instruction error mask | |
2508 | * bit is reserved, so we leave it masked. | 2530 | * bit is reserved, so we leave it masked. | |
2509 | */ | 2531 | */ | |
2510 | if (IS_G4X(dev)) { | 2532 | if (IS_G4X(dev)) { | |
2511 | error_mask = ~(GM45_ERROR_PAGE_TABLE | | 2533 | error_mask = ~(GM45_ERROR_PAGE_TABLE | | |
2512 | GM45_ERROR_MEM_PRIV | | 2534 | GM45_ERROR_MEM_PRIV | | |
2513 | GM45_ERROR_CP_PRIV | | 2535 | GM45_ERROR_CP_PRIV | | |
2514 | I915_ERROR_MEMORY_REFRESH); | 2536 | I915_ERROR_MEMORY_REFRESH); | |
2515 | } else { | 2537 | } else { | |
2516 | error_mask = ~(I915_ERROR_PAGE_TABLE | | 2538 | error_mask = ~(I915_ERROR_PAGE_TABLE | | |
2517 | I915_ERROR_MEMORY_REFRESH); | 2539 | I915_ERROR_MEMORY_REFRESH); | |
2518 | } | 2540 | } | |
2519 | I915_WRITE(EMR, error_mask); | 2541 | I915_WRITE(EMR, error_mask); | |
2520 | 2542 | |||
2521 | I915_WRITE(IMR, dev_priv->irq_mask); | 2543 | I915_WRITE(IMR, dev_priv->irq_mask); | |
2522 | I915_WRITE(IER, enable_mask); | 2544 | I915_WRITE(IER, enable_mask); | |
2523 | POSTING_READ(IER); | 2545 | POSTING_READ(IER); | |
2524 | 2546 | |||
2525 | /* Note HDMI and DP share hotplug bits */ | 2547 | /* Note HDMI and DP share hotplug bits */ | |
2526 | hotplug_en = 0; | 2548 | hotplug_en = 0; | |
2527 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | 2549 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | |
2528 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | 2550 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | |
2529 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | 2551 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | |
2530 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | 2552 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | |
2531 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | 2553 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | |
2532 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | 2554 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | |
2533 | if (IS_G4X(dev)) { | 2555 | if (IS_G4X(dev)) { | |
2534 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) | 2556 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) | |
2535 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | 2557 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | |
2536 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X) | 2558 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X) | |
2537 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | 2559 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | |
2538 | } else { | 2560 | } else { | |
2539 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965) | 2561 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965) | |
2540 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | 2562 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | |
2541 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965) | 2563 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965) | |
2542 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | 2564 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | |
2543 | } | 2565 | } | |
2544 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { | 2566 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { | |
2545 | hotplug_en |= CRT_HOTPLUG_INT_EN; | 2567 | hotplug_en |= CRT_HOTPLUG_INT_EN; | |
2546 | 2568 | |||
2547 | /* Programming the CRT detection parameters tends | 2569 | /* Programming the CRT detection parameters tends | |
2548 | to generate a spurious hotplug event about three | 2570 | to generate a spurious hotplug event about three | |
2549 | seconds later. So just do it once. | 2571 | seconds later. So just do it once. | |
2550 | */ | 2572 | */ | |
2551 | if (IS_G4X(dev)) | 2573 | if (IS_G4X(dev)) | |
2552 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | 2574 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | |
2553 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | 2575 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | |
2554 | } | 2576 | } | |
2555 | 2577 | |||
2556 | /* Ignore TV since it's buggy */ | 2578 | /* Ignore TV since it's buggy */ | |
2557 | 2579 | |||
2558 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | 2580 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | |
2559 | 2581 | |||
2560 | intel_opregion_enable_asle(dev); | 2582 | intel_opregion_enable_asle(dev); | |
2561 | 2583 | |||
2562 | return 0; | 2584 | return 0; | |
2563 | } | 2585 | } | |
2564 | 2586 | |||
2565 | static irqreturn_t i965_irq_handler(int irq, void *arg) | 2587 | static irqreturn_t i965_irq_handler(int irq, void *arg) | |
2566 | { | 2588 | { | |
2567 | struct drm_device *dev = (struct drm_device *) arg; | 2589 | struct drm_device *dev = (struct drm_device *) arg; | |
2568 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2590 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2569 | u32 iir, new_iir; | 2591 | u32 iir, new_iir; | |
2570 | u32 pipe_stats[I915_MAX_PIPES]; | 2592 | u32 pipe_stats[I915_MAX_PIPES]; | |
2571 | unsigned long irqflags; | 2593 | unsigned long irqflags; | |
2572 | int irq_received; | 2594 | int irq_received; | |
2573 | int ret = IRQ_NONE, pipe; | 2595 | int ret = IRQ_NONE, pipe; | |
2574 | 2596 | |||
2575 | atomic_inc(&dev_priv->irq_received); | 2597 | atomic_inc(&dev_priv->irq_received); | |
2576 | 2598 | |||
2577 | iir = I915_READ(IIR); | 2599 | iir = I915_READ(IIR); | |
2578 | 2600 | |||
2579 | for (;;) { | 2601 | for (;;) { | |
2580 | bool blc_event = false; | 2602 | bool blc_event = false; | |
2581 | 2603 | |||
2582 | irq_received = iir != 0; | 2604 | irq_received = iir != 0; | |
2583 | 2605 | |||
2584 | /* Can't rely on pipestat interrupt bit in iir as it might | 2606 | /* Can't rely on pipestat interrupt bit in iir as it might | |
2585 | * have been cleared after the pipestat interrupt was received. | 2607 | * have been cleared after the pipestat interrupt was received. | |
2586 | * It doesn't set the bit in iir again, but it still produces | 2608 | * It doesn't set the bit in iir again, but it still produces | |
2587 | * interrupts (for non-MSI). | 2609 | * interrupts (for non-MSI). | |
2588 | */ | 2610 | */ | |
2589 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 2611 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2590 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 2612 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
2591 | i915_handle_error(dev, false); | 2613 | i915_handle_error(dev, false); | |
2592 | 2614 | |||
2593 | for_each_pipe(pipe) { | 2615 | for_each_pipe(pipe) { | |
2594 | int reg = PIPESTAT(pipe); | 2616 | int reg = PIPESTAT(pipe); | |
2595 | pipe_stats[pipe] = I915_READ(reg); | 2617 | pipe_stats[pipe] = I915_READ(reg); | |
2596 | 2618 | |||
2597 | /* | 2619 | /* | |
2598 | * Clear the PIPE*STAT regs before the IIR | 2620 | * Clear the PIPE*STAT regs before the IIR | |
2599 | */ | 2621 | */ | |
2600 | if (pipe_stats[pipe] & 0x8000ffff) { | 2622 | if (pipe_stats[pipe] & 0x8000ffff) { | |
2601 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | 2623 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
2602 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | 2624 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
2603 | pipe_name(pipe)); | 2625 | pipe_name(pipe)); | |
2604 | I915_WRITE(reg, pipe_stats[pipe]); | 2626 | I915_WRITE(reg, pipe_stats[pipe]); | |
2605 | irq_received = 1; | 2627 | irq_received = 1; | |
2606 | } | 2628 | } | |
2607 | } | 2629 | } | |
2608 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 2630 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
2609 | 2631 | |||
2610 | if (!irq_received) | 2632 | if (!irq_received) | |
2611 | break; | 2633 | break; | |
2612 | 2634 | |||
2613 | ret = IRQ_HANDLED; | 2635 | ret = IRQ_HANDLED; | |
2614 | 2636 | |||
2615 | /* Consume port. Then clear IIR or we'll miss events */ | 2637 | /* Consume port. Then clear IIR or we'll miss events */ | |
2616 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { | 2638 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { | |
2617 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 2639 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | |
2618 | 2640 | |||
2619 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | 2641 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | |
2620 | hotplug_status); | 2642 | hotplug_status); | |
2621 | if (hotplug_status & dev_priv->hotplug_supported_mask) | 2643 | if (hotplug_status & dev_priv->hotplug_supported_mask) | |
2622 | queue_work(dev_priv->wq, | 2644 | queue_work(dev_priv->wq, | |
2623 | &dev_priv->hotplug_work); | 2645 | &dev_priv->hotplug_work); | |
2624 | 2646 | |||
2625 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 2647 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | |
2626 | I915_READ(PORT_HOTPLUG_STAT); | 2648 | I915_READ(PORT_HOTPLUG_STAT); | |
2627 | } | 2649 | } | |
2628 | 2650 | |||
2629 | I915_WRITE(IIR, iir); | 2651 | I915_WRITE(IIR, iir); | |
2630 | new_iir = I915_READ(IIR); /* Flush posted writes */ | 2652 | new_iir = I915_READ(IIR); /* Flush posted writes */ | |
2631 | 2653 | |||
2632 | if (iir & I915_USER_INTERRUPT) | 2654 | if (iir & I915_USER_INTERRUPT) | |
2633 | notify_ring(dev, &dev_priv->ring[RCS]); | 2655 | notify_ring(dev, &dev_priv->ring[RCS]); | |
2634 | if (iir & I915_BSD_USER_INTERRUPT) | 2656 | if (iir & I915_BSD_USER_INTERRUPT) | |
2635 | notify_ring(dev, &dev_priv->ring[VCS]); | 2657 | notify_ring(dev, &dev_priv->ring[VCS]); | |
2636 | 2658 | |||
2637 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) | 2659 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) | |
2638 | intel_prepare_page_flip(dev, 0); | 2660 | intel_prepare_page_flip(dev, 0); | |
2639 | 2661 | |||
2640 | if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) | 2662 | if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) | |
2641 | intel_prepare_page_flip(dev, 1); | 2663 | intel_prepare_page_flip(dev, 1); | |
2642 | 2664 | |||
2643 | for_each_pipe(pipe) { | 2665 | for_each_pipe(pipe) { | |
2644 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && | 2666 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && | |
2645 | drm_handle_vblank(dev, pipe)) { | 2667 | drm_handle_vblank(dev, pipe)) { | |
2646 | i915_pageflip_stall_check(dev, pipe); | 2668 | i915_pageflip_stall_check(dev, pipe); | |
2647 | intel_finish_page_flip(dev, pipe); | 2669 | intel_finish_page_flip(dev, pipe); | |
2648 | } | 2670 | } | |
2649 | 2671 | |||
2650 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | 2672 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
2651 | blc_event = true; | 2673 | blc_event = true; | |
2652 | } | 2674 | } | |
2653 | 2675 | |||
2654 | 2676 | |||
2655 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) | 2677 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) | |
2656 | intel_opregion_asle_intr(dev); | 2678 | intel_opregion_asle_intr(dev); | |
2657 | 2679 | |||
2658 | /* With MSI, interrupts are only generated when iir | 2680 | /* With MSI, interrupts are only generated when iir | |
2659 | * transitions from zero to nonzero. If another bit got | 2681 | * transitions from zero to nonzero. If another bit got | |
2660 | * set while we were handling the existing iir bits, then | 2682 | * set while we were handling the existing iir bits, then | |
2661 | * we would never get another interrupt. | 2683 | * we would never get another interrupt. | |
2662 | * | 2684 | * | |
2663 | * This is fine on non-MSI as well, as if we hit this path | 2685 | * This is fine on non-MSI as well, as if we hit this path | |
2664 | * we avoid exiting the interrupt handler only to generate | 2686 | * we avoid exiting the interrupt handler only to generate | |
2665 | * another one. | 2687 | * another one. | |
2666 | * | 2688 | * | |
2667 | * Note that for MSI this could cause a stray interrupt report | 2689 | * Note that for MSI this could cause a stray interrupt report | |
2668 | * if an interrupt landed in the time between writing IIR and | 2690 | * if an interrupt landed in the time between writing IIR and | |
2669 | * the posting read. This should be rare enough to never | 2691 | * the posting read. This should be rare enough to never | |
2670 | * trigger the 99% of 100,000 interrupts test for disabling | 2692 | * trigger the 99% of 100,000 interrupts test for disabling | |
2671 | * stray interrupts. | 2693 | * stray interrupts. | |
2672 | */ | 2694 | */ | |
2673 | iir = new_iir; | 2695 | iir = new_iir; | |
2674 | } | 2696 | } | |
2675 | 2697 | |||
2676 | i915_update_dri1_breadcrumb(dev); | 2698 | i915_update_dri1_breadcrumb(dev); | |
2677 | 2699 | |||
2678 | return ret; | 2700 | return ret; |
--- src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_ringbuffer.c 2013/07/24 03:05:41 1.1.1.1.2.4
+++ src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_ringbuffer.c 2013/07/24 03:06:00 1.1.1.1.2.5
@@ -155,1743 +155,1747 @@ gen4_render_ring_flush(struct intel_ring | @@ -155,1743 +155,1747 @@ gen4_render_ring_flush(struct intel_ring | |||
155 | * flushes. | 155 | * flushes. | |
156 | * | 156 | * | |
157 | * And this last workaround is tricky because of the requirements on | 157 | * And this last workaround is tricky because of the requirements on | |
158 | * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM | 158 | * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM | |
159 | * volume 2 part 1: | 159 | * volume 2 part 1: | |
160 | * | 160 | * | |
161 | * "1 of the following must also be set: | 161 | * "1 of the following must also be set: | |
162 | * - Render Target Cache Flush Enable ([12] of DW1) | 162 | * - Render Target Cache Flush Enable ([12] of DW1) | |
163 | * - Depth Cache Flush Enable ([0] of DW1) | 163 | * - Depth Cache Flush Enable ([0] of DW1) | |
164 | * - Stall at Pixel Scoreboard ([1] of DW1) | 164 | * - Stall at Pixel Scoreboard ([1] of DW1) | |
165 | * - Depth Stall ([13] of DW1) | 165 | * - Depth Stall ([13] of DW1) | |
166 | * - Post-Sync Operation ([13] of DW1) | 166 | * - Post-Sync Operation ([13] of DW1) | |
167 | * - Notify Enable ([8] of DW1)" | 167 | * - Notify Enable ([8] of DW1)" | |
168 | * | 168 | * | |
169 | * The cache flushes require the workaround flush that triggered this | 169 | * The cache flushes require the workaround flush that triggered this | |
170 | * one, so we can't use it. Depth stall would trigger the same. | 170 | * one, so we can't use it. Depth stall would trigger the same. | |
171 | * Post-sync nonzero is what triggered this second workaround, so we | 171 | * Post-sync nonzero is what triggered this second workaround, so we | |
172 | * can't use that one either. Notify enable is IRQs, which aren't | 172 | * can't use that one either. Notify enable is IRQs, which aren't | |
173 | * really our business. That leaves only stall at scoreboard. | 173 | * really our business. That leaves only stall at scoreboard. | |
174 | */ | 174 | */ | |
175 | static int | 175 | static int | |
176 | intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) | 176 | intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) | |
177 | { | 177 | { | |
178 | struct pipe_control *pc = ring->private; | 178 | struct pipe_control *pc = ring->private; | |
179 | u32 scratch_addr = pc->gtt_offset + 128; | 179 | u32 scratch_addr = pc->gtt_offset + 128; | |
180 | int ret; | 180 | int ret; | |
181 | 181 | |||
182 | 182 | |||
183 | ret = intel_ring_begin(ring, 6); | 183 | ret = intel_ring_begin(ring, 6); | |
184 | if (ret) | 184 | if (ret) | |
185 | return ret; | 185 | return ret; | |
186 | 186 | |||
187 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | 187 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | |
188 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | | 188 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | | |
189 | PIPE_CONTROL_STALL_AT_SCOREBOARD); | 189 | PIPE_CONTROL_STALL_AT_SCOREBOARD); | |
190 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ | 190 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ | |
191 | intel_ring_emit(ring, 0); /* low dword */ | 191 | intel_ring_emit(ring, 0); /* low dword */ | |
192 | intel_ring_emit(ring, 0); /* high dword */ | 192 | intel_ring_emit(ring, 0); /* high dword */ | |
193 | intel_ring_emit(ring, MI_NOOP); | 193 | intel_ring_emit(ring, MI_NOOP); | |
194 | intel_ring_advance(ring); | 194 | intel_ring_advance(ring); | |
195 | 195 | |||
196 | ret = intel_ring_begin(ring, 6); | 196 | ret = intel_ring_begin(ring, 6); | |
197 | if (ret) | 197 | if (ret) | |
198 | return ret; | 198 | return ret; | |
199 | 199 | |||
200 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | 200 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | |
201 | intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); | 201 | intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); | |
202 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ | 202 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ | |
203 | intel_ring_emit(ring, 0); | 203 | intel_ring_emit(ring, 0); | |
204 | intel_ring_emit(ring, 0); | 204 | intel_ring_emit(ring, 0); | |
205 | intel_ring_emit(ring, MI_NOOP); | 205 | intel_ring_emit(ring, MI_NOOP); | |
206 | intel_ring_advance(ring); | 206 | intel_ring_advance(ring); | |
207 | 207 | |||
208 | return 0; | 208 | return 0; | |
209 | } | 209 | } | |
210 | 210 | |||
211 | static int | 211 | static int | |
212 | gen6_render_ring_flush(struct intel_ring_buffer *ring, | 212 | gen6_render_ring_flush(struct intel_ring_buffer *ring, | |
213 | u32 invalidate_domains, u32 flush_domains) | 213 | u32 invalidate_domains, u32 flush_domains) | |
214 | { | 214 | { | |
215 | u32 flags = 0; | 215 | u32 flags = 0; | |
216 | struct pipe_control *pc = ring->private; | 216 | struct pipe_control *pc = ring->private; | |
217 | u32 scratch_addr = pc->gtt_offset + 128; | 217 | u32 scratch_addr = pc->gtt_offset + 128; | |
218 | int ret; | 218 | int ret; | |
219 | 219 | |||
220 | /* Force SNB workarounds for PIPE_CONTROL flushes */ | 220 | /* Force SNB workarounds for PIPE_CONTROL flushes */ | |
221 | ret = intel_emit_post_sync_nonzero_flush(ring); | 221 | ret = intel_emit_post_sync_nonzero_flush(ring); | |
222 | if (ret) | 222 | if (ret) | |
223 | return ret; | 223 | return ret; | |
224 | 224 | |||
225 | /* Just flush everything. Experiments have shown that reducing the | 225 | /* Just flush everything. Experiments have shown that reducing the | |
226 | * number of bits based on the write domains has little performance | 226 | * number of bits based on the write domains has little performance | |
227 | * impact. | 227 | * impact. | |
228 | */ | 228 | */ | |
229 | if (flush_domains) { | 229 | if (flush_domains) { | |
230 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | 230 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | |
231 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | 231 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | |
232 | /* | 232 | /* | |
233 | * Ensure that any following seqno writes only happen | 233 | * Ensure that any following seqno writes only happen | |
234 | * when the render cache is indeed flushed. | 234 | * when the render cache is indeed flushed. | |
235 | */ | 235 | */ | |
236 | flags |= PIPE_CONTROL_CS_STALL; | 236 | flags |= PIPE_CONTROL_CS_STALL; | |
237 | } | 237 | } | |
238 | if (invalidate_domains) { | 238 | if (invalidate_domains) { | |
239 | flags |= PIPE_CONTROL_TLB_INVALIDATE; | 239 | flags |= PIPE_CONTROL_TLB_INVALIDATE; | |
240 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | 240 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | |
241 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | 241 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | |
242 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | 242 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | |
243 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | 243 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | |
244 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | 244 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | |
245 | /* | 245 | /* | |
246 | * TLB invalidate requires a post-sync write. | 246 | * TLB invalidate requires a post-sync write. | |
247 | */ | 247 | */ | |
248 | flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; | 248 | flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; | |
249 | } | 249 | } | |
250 | 250 | |||
251 | ret = intel_ring_begin(ring, 4); | 251 | ret = intel_ring_begin(ring, 4); | |
252 | if (ret) | 252 | if (ret) | |
253 | return ret; | 253 | return ret; | |
254 | 254 | |||
255 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); | 255 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); | |
256 | intel_ring_emit(ring, flags); | 256 | intel_ring_emit(ring, flags); | |
257 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); | 257 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); | |
258 | intel_ring_emit(ring, 0); | 258 | intel_ring_emit(ring, 0); | |
259 | intel_ring_advance(ring); | 259 | intel_ring_advance(ring); | |
260 | 260 | |||
261 | return 0; | 261 | return 0; | |
262 | } | 262 | } | |
263 | 263 | |||
264 | static int | 264 | static int | |
265 | gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) | 265 | gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) | |
266 | { | 266 | { | |
267 | int ret; | 267 | int ret; | |
268 | 268 | |||
269 | ret = intel_ring_begin(ring, 4); | 269 | ret = intel_ring_begin(ring, 4); | |
270 | if (ret) | 270 | if (ret) | |
271 | return ret; | 271 | return ret; | |
272 | 272 | |||
273 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); | 273 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); | |
274 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | | 274 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | | |
275 | PIPE_CONTROL_STALL_AT_SCOREBOARD); | 275 | PIPE_CONTROL_STALL_AT_SCOREBOARD); | |
276 | intel_ring_emit(ring, 0); | 276 | intel_ring_emit(ring, 0); | |
277 | intel_ring_emit(ring, 0); | 277 | intel_ring_emit(ring, 0); | |
278 | intel_ring_advance(ring); | 278 | intel_ring_advance(ring); | |
279 | 279 | |||
280 | return 0; | 280 | return 0; | |
281 | } | 281 | } | |
282 | 282 | |||
283 | static int | 283 | static int | |
284 | gen7_render_ring_flush(struct intel_ring_buffer *ring, | 284 | gen7_render_ring_flush(struct intel_ring_buffer *ring, | |
285 | u32 invalidate_domains, u32 flush_domains) | 285 | u32 invalidate_domains, u32 flush_domains) | |
286 | { | 286 | { | |
287 | u32 flags = 0; | 287 | u32 flags = 0; | |
288 | struct pipe_control *pc = ring->private; | 288 | struct pipe_control *pc = ring->private; | |
289 | u32 scratch_addr = pc->gtt_offset + 128; | 289 | u32 scratch_addr = pc->gtt_offset + 128; | |
290 | int ret; | 290 | int ret; | |
291 | 291 | |||
292 | /* | 292 | /* | |
293 | * Ensure that any following seqno writes only happen when the render | 293 | * Ensure that any following seqno writes only happen when the render | |
294 | * cache is indeed flushed. | 294 | * cache is indeed flushed. | |
295 | * | 295 | * | |
296 | * Workaround: 4th PIPE_CONTROL command (except the ones with only | 296 | * Workaround: 4th PIPE_CONTROL command (except the ones with only | |
297 | * read-cache invalidate bits set) must have the CS_STALL bit set. We | 297 | * read-cache invalidate bits set) must have the CS_STALL bit set. We | |
298 | * don't try to be clever and just set it unconditionally. | 298 | * don't try to be clever and just set it unconditionally. | |
299 | */ | 299 | */ | |
300 | flags |= PIPE_CONTROL_CS_STALL; | 300 | flags |= PIPE_CONTROL_CS_STALL; | |
301 | 301 | |||
302 | /* Just flush everything. Experiments have shown that reducing the | 302 | /* Just flush everything. Experiments have shown that reducing the | |
303 | * number of bits based on the write domains has little performance | 303 | * number of bits based on the write domains has little performance | |
304 | * impact. | 304 | * impact. | |
305 | */ | 305 | */ | |
306 | if (flush_domains) { | 306 | if (flush_domains) { | |
307 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | 307 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | |
308 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | 308 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | |
309 | } | 309 | } | |
310 | if (invalidate_domains) { | 310 | if (invalidate_domains) { | |
311 | flags |= PIPE_CONTROL_TLB_INVALIDATE; | 311 | flags |= PIPE_CONTROL_TLB_INVALIDATE; | |
312 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | 312 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | |
313 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | 313 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | |
314 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | 314 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | |
315 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | 315 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | |
316 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | 316 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | |
317 | /* | 317 | /* | |
318 | * TLB invalidate requires a post-sync write. | 318 | * TLB invalidate requires a post-sync write. | |
319 | */ | 319 | */ | |
320 | flags |= PIPE_CONTROL_QW_WRITE; | 320 | flags |= PIPE_CONTROL_QW_WRITE; | |
321 | 321 | |||
322 | /* Workaround: we must issue a pipe_control with CS-stall bit | 322 | /* Workaround: we must issue a pipe_control with CS-stall bit | |
323 | * set before a pipe_control command that has the state cache | 323 | * set before a pipe_control command that has the state cache | |
324 | * invalidate bit set. */ | 324 | * invalidate bit set. */ | |
325 | gen7_render_ring_cs_stall_wa(ring); | 325 | gen7_render_ring_cs_stall_wa(ring); | |
326 | } | 326 | } | |
327 | 327 | |||
328 | ret = intel_ring_begin(ring, 4); | 328 | ret = intel_ring_begin(ring, 4); | |
329 | if (ret) | 329 | if (ret) | |
330 | return ret; | 330 | return ret; | |
331 | 331 | |||
332 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); | 332 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); | |
333 | intel_ring_emit(ring, flags); | 333 | intel_ring_emit(ring, flags); | |
334 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); | 334 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); | |
335 | intel_ring_emit(ring, 0); | 335 | intel_ring_emit(ring, 0); | |
336 | intel_ring_advance(ring); | 336 | intel_ring_advance(ring); | |
337 | 337 | |||
338 | return 0; | 338 | return 0; | |
339 | } | 339 | } | |
340 | 340 | |||
341 | static void ring_write_tail(struct intel_ring_buffer *ring, | 341 | static void ring_write_tail(struct intel_ring_buffer *ring, | |
342 | u32 value) | 342 | u32 value) | |
343 | { | 343 | { | |
344 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 344 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | |
345 | I915_WRITE_TAIL(ring, value); | 345 | I915_WRITE_TAIL(ring, value); | |
346 | } | 346 | } | |
347 | 347 | |||
348 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) | 348 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) | |
349 | { | 349 | { | |
350 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 350 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | |
351 | u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? | 351 | u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? | |
352 | RING_ACTHD(ring->mmio_base) : ACTHD; | 352 | RING_ACTHD(ring->mmio_base) : ACTHD; | |
353 | 353 | |||
354 | return I915_READ(acthd_reg); | 354 | return I915_READ(acthd_reg); | |
355 | } | 355 | } | |
356 | 356 | |||
357 | static int init_ring_common(struct intel_ring_buffer *ring) | 357 | static int init_ring_common(struct intel_ring_buffer *ring) | |
358 | { | 358 | { | |
359 | struct drm_device *dev = ring->dev; | 359 | struct drm_device *dev = ring->dev; | |
360 | drm_i915_private_t *dev_priv = dev->dev_private; | 360 | drm_i915_private_t *dev_priv = dev->dev_private; | |
361 | struct drm_i915_gem_object *obj = ring->obj; | 361 | struct drm_i915_gem_object *obj = ring->obj; | |
362 | int ret = 0; | 362 | int ret = 0; | |
363 | u32 head; | 363 | u32 head; | |
364 | 364 | |||
365 | if (HAS_FORCE_WAKE(dev)) | 365 | if (HAS_FORCE_WAKE(dev)) | |
366 | gen6_gt_force_wake_get(dev_priv); | 366 | gen6_gt_force_wake_get(dev_priv); | |
367 | 367 | |||
368 | /* Stop the ring if it's running. */ | 368 | /* Stop the ring if it's running. */ | |
369 | I915_WRITE_CTL(ring, 0); | 369 | I915_WRITE_CTL(ring, 0); | |
370 | I915_WRITE_HEAD(ring, 0); | 370 | I915_WRITE_HEAD(ring, 0); | |
371 | ring->write_tail(ring, 0); | 371 | ring->write_tail(ring, 0); | |
372 | 372 | |||
373 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | 373 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | |
374 | 374 | |||
375 | /* G45 ring initialization fails to reset head to zero */ | 375 | /* G45 ring initialization fails to reset head to zero */ | |
376 | if (head != 0) { | 376 | if (head != 0) { | |
377 | DRM_DEBUG_KMS("%s head not reset to zero " | 377 | DRM_DEBUG_KMS("%s head not reset to zero " | |
378 | "ctl %08x head %08x tail %08x start %08x\n", | 378 | "ctl %08x head %08x tail %08x start %08x\n", | |
379 | ring->name, | 379 | ring->name, | |
380 | I915_READ_CTL(ring), | 380 | I915_READ_CTL(ring), | |
381 | I915_READ_HEAD(ring), | 381 | I915_READ_HEAD(ring), | |
382 | I915_READ_TAIL(ring), | 382 | I915_READ_TAIL(ring), | |
383 | I915_READ_START(ring)); | 383 | I915_READ_START(ring)); | |
384 | 384 | |||
385 | I915_WRITE_HEAD(ring, 0); | 385 | I915_WRITE_HEAD(ring, 0); | |
386 | 386 | |||
387 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { | 387 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { | |
388 | DRM_ERROR("failed to set %s head to zero " | 388 | DRM_ERROR("failed to set %s head to zero " | |
389 | "ctl %08x head %08x tail %08x start %08x\n", | 389 | "ctl %08x head %08x tail %08x start %08x\n", | |
390 | ring->name, | 390 | ring->name, | |
391 | I915_READ_CTL(ring), | 391 | I915_READ_CTL(ring), | |
392 | I915_READ_HEAD(ring), | 392 | I915_READ_HEAD(ring), | |
393 | I915_READ_TAIL(ring), | 393 | I915_READ_TAIL(ring), | |
394 | I915_READ_START(ring)); | 394 | I915_READ_START(ring)); | |
395 | } | 395 | } | |
396 | } | 396 | } | |
397 | 397 | |||
398 | /* Initialize the ring. This must happen _after_ we've cleared the ring | 398 | /* Initialize the ring. This must happen _after_ we've cleared the ring | |
399 | * registers with the above sequence (the readback of the HEAD registers | 399 | * registers with the above sequence (the readback of the HEAD registers | |
400 | * also enforces ordering), otherwise the hw might lose the new ring | 400 | * also enforces ordering), otherwise the hw might lose the new ring | |
401 | * register values. */ | 401 | * register values. */ | |
402 | I915_WRITE_START(ring, obj->gtt_offset); | 402 | I915_WRITE_START(ring, obj->gtt_offset); | |
403 | I915_WRITE_CTL(ring, | 403 | I915_WRITE_CTL(ring, | |
404 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | 404 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | |
405 | | RING_VALID); | 405 | | RING_VALID); | |
406 | 406 | |||
407 | /* If the head is still not zero, the ring is dead */ | 407 | /* If the head is still not zero, the ring is dead */ | |
408 | if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && | 408 | if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && | |
409 | I915_READ_START(ring) == obj->gtt_offset && | 409 | I915_READ_START(ring) == obj->gtt_offset && | |
410 | (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { | 410 | (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { | |
411 | DRM_ERROR("%s initialization failed " | 411 | DRM_ERROR("%s initialization failed " | |
412 | "ctl %08x head %08x tail %08x start %08x\n", | 412 | "ctl %08x head %08x tail %08x start %08x\n", | |
413 | ring->name, | 413 | ring->name, | |
414 | I915_READ_CTL(ring), | 414 | I915_READ_CTL(ring), | |
415 | I915_READ_HEAD(ring), | 415 | I915_READ_HEAD(ring), | |
416 | I915_READ_TAIL(ring), | 416 | I915_READ_TAIL(ring), | |
417 | I915_READ_START(ring)); | 417 | I915_READ_START(ring)); | |
418 | ret = -EIO; | 418 | ret = -EIO; | |
419 | goto out; | 419 | goto out; | |
420 | } | 420 | } | |
421 | 421 | |||
422 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) | 422 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) | |
423 | i915_kernel_lost_context(ring->dev); | 423 | i915_kernel_lost_context(ring->dev); | |
424 | else { | 424 | else { | |
425 | ring->head = I915_READ_HEAD(ring); | 425 | ring->head = I915_READ_HEAD(ring); | |
426 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 426 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | |
427 | ring->space = ring_space(ring); | 427 | ring->space = ring_space(ring); | |
428 | ring->last_retired_head = -1; | 428 | ring->last_retired_head = -1; | |
429 | } | 429 | } | |
430 | 430 | |||
431 | out: | 431 | out: | |
432 | if (HAS_FORCE_WAKE(dev)) | 432 | if (HAS_FORCE_WAKE(dev)) | |
433 | gen6_gt_force_wake_put(dev_priv); | 433 | gen6_gt_force_wake_put(dev_priv); | |
434 | 434 | |||
435 | return ret; | 435 | return ret; | |
436 | } | 436 | } | |
437 | 437 | |||
438 | static int | 438 | static int | |
439 | init_pipe_control(struct intel_ring_buffer *ring) | 439 | init_pipe_control(struct intel_ring_buffer *ring) | |
440 | { | 440 | { | |
441 | struct pipe_control *pc; | 441 | struct pipe_control *pc; | |
442 | struct drm_i915_gem_object *obj; | 442 | struct drm_i915_gem_object *obj; | |
443 | int ret; | 443 | int ret; | |
444 | 444 | |||
445 | if (ring->private) | 445 | if (ring->private) | |
446 | return 0; | 446 | return 0; | |
447 | 447 | |||
448 | pc = kmalloc(sizeof(*pc), GFP_KERNEL); | 448 | pc = kmalloc(sizeof(*pc), GFP_KERNEL); | |
449 | if (!pc) | 449 | if (!pc) | |
450 | return -ENOMEM; | 450 | return -ENOMEM; | |
451 | 451 | |||
452 | obj = i915_gem_alloc_object(ring->dev, 4096); | 452 | obj = i915_gem_alloc_object(ring->dev, 4096); | |
453 | if (obj == NULL) { | 453 | if (obj == NULL) { | |
454 | DRM_ERROR("Failed to allocate seqno page\n"); | 454 | DRM_ERROR("Failed to allocate seqno page\n"); | |
455 | ret = -ENOMEM; | 455 | ret = -ENOMEM; | |
456 | goto err; | 456 | goto err; | |
457 | } | 457 | } | |
458 | 458 | |||
459 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | 459 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | |
460 | 460 | |||
461 | ret = i915_gem_object_pin(obj, 4096, true, false); | 461 | ret = i915_gem_object_pin(obj, 4096, true, false); | |
462 | if (ret) | 462 | if (ret) | |
463 | goto err_unref; | 463 | goto err_unref; | |
464 | 464 | |||
465 | pc->gtt_offset = obj->gtt_offset; | 465 | pc->gtt_offset = obj->gtt_offset; | |
466 | pc->cpu_page = kmap(sg_page(obj->pages->sgl)); | 466 | pc->cpu_page = kmap(sg_page(obj->pages->sgl)); | |
467 | if (pc->cpu_page == NULL) | 467 | if (pc->cpu_page == NULL) | |
468 | goto err_unpin; | 468 | goto err_unpin; | |
469 | 469 | |||
470 | pc->obj = obj; | 470 | pc->obj = obj; | |
471 | ring->private = pc; | 471 | ring->private = pc; | |
472 | return 0; | 472 | return 0; | |
473 | 473 | |||
474 | err_unpin: | 474 | err_unpin: | |
475 | i915_gem_object_unpin(obj); | 475 | i915_gem_object_unpin(obj); | |
476 | err_unref: | 476 | err_unref: | |
477 | drm_gem_object_unreference(&obj->base); | 477 | drm_gem_object_unreference(&obj->base); | |
478 | err: | 478 | err: | |
479 | kfree(pc); | 479 | kfree(pc); | |
480 | return ret; | 480 | return ret; | |
481 | } | 481 | } | |
482 | 482 | |||
483 | static void | 483 | static void | |
484 | cleanup_pipe_control(struct intel_ring_buffer *ring) | 484 | cleanup_pipe_control(struct intel_ring_buffer *ring) | |
485 | { | 485 | { | |
486 | struct pipe_control *pc = ring->private; | 486 | struct pipe_control *pc = ring->private; | |
487 | struct drm_i915_gem_object *obj; | 487 | struct drm_i915_gem_object *obj; | |
488 | 488 | |||
489 | if (!ring->private) | 489 | if (!ring->private) | |
490 | return; | 490 | return; | |
491 | 491 | |||
492 | obj = pc->obj; | 492 | obj = pc->obj; | |
493 | 493 | |||
494 | kunmap(sg_page(obj->pages->sgl)); | 494 | kunmap(sg_page(obj->pages->sgl)); | |
495 | i915_gem_object_unpin(obj); | 495 | i915_gem_object_unpin(obj); | |
496 | drm_gem_object_unreference(&obj->base); | 496 | drm_gem_object_unreference(&obj->base); | |
497 | 497 | |||
498 | kfree(pc); | 498 | kfree(pc); | |
499 | ring->private = NULL; | 499 | ring->private = NULL; | |
500 | } | 500 | } | |
501 | 501 | |||
502 | static int init_render_ring(struct intel_ring_buffer *ring) | 502 | static int init_render_ring(struct intel_ring_buffer *ring) | |
503 | { | 503 | { | |
504 | struct drm_device *dev = ring->dev; | 504 | struct drm_device *dev = ring->dev; | |
505 | struct drm_i915_private *dev_priv = dev->dev_private; | 505 | struct drm_i915_private *dev_priv = dev->dev_private; | |
506 | int ret = init_ring_common(ring); | 506 | int ret = init_ring_common(ring); | |
507 | 507 | |||
508 | if (INTEL_INFO(dev)->gen > 3) | 508 | if (INTEL_INFO(dev)->gen > 3) | |
509 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); | 509 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); | |
510 | 510 | |||
511 | /* We need to disable the AsyncFlip performance optimisations in order | 511 | /* We need to disable the AsyncFlip performance optimisations in order | |
512 | * to use MI_WAIT_FOR_EVENT within the CS. It should already be | 512 | * to use MI_WAIT_FOR_EVENT within the CS. It should already be | |
513 | * programmed to '1' on all products. | 513 | * programmed to '1' on all products. | |
514 | */ | 514 | */ | |
515 | if (INTEL_INFO(dev)->gen >= 6) | 515 | if (INTEL_INFO(dev)->gen >= 6) | |
516 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); | 516 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); | |
517 | 517 | |||
518 | /* Required for the hardware to program scanline values for waiting */ | 518 | /* Required for the hardware to program scanline values for waiting */ | |
519 | if (INTEL_INFO(dev)->gen == 6) | 519 | if (INTEL_INFO(dev)->gen == 6) | |
520 | I915_WRITE(GFX_MODE, | 520 | I915_WRITE(GFX_MODE, | |
521 | _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); | 521 | _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); | |
522 | 522 | |||
523 | if (IS_GEN7(dev)) | 523 | if (IS_GEN7(dev)) | |
524 | I915_WRITE(GFX_MODE_GEN7, | 524 | I915_WRITE(GFX_MODE_GEN7, | |
525 | _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | | 525 | _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | | |
526 | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); | 526 | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); | |
527 | 527 | |||
528 | if (INTEL_INFO(dev)->gen >= 5) { | 528 | if (INTEL_INFO(dev)->gen >= 5) { | |
529 | ret = init_pipe_control(ring); | 529 | ret = init_pipe_control(ring); | |
530 | if (ret) | 530 | if (ret) | |
531 | return ret; | 531 | return ret; | |
532 | } | 532 | } | |
533 | 533 | |||
534 | if (IS_GEN6(dev)) { | 534 | if (IS_GEN6(dev)) { | |
535 | /* From the Sandybridge PRM, volume 1 part 3, page 24: | 535 | /* From the Sandybridge PRM, volume 1 part 3, page 24: | |
536 | * "If this bit is set, STCunit will have LRA as replacement | 536 | * "If this bit is set, STCunit will have LRA as replacement | |
537 | * policy. [...] This bit must be reset. LRA replacement | 537 | * policy. [...] This bit must be reset. LRA replacement | |
538 | * policy is not supported." | 538 | * policy is not supported." | |
539 | */ | 539 | */ | |
540 | I915_WRITE(CACHE_MODE_0, | 540 | I915_WRITE(CACHE_MODE_0, | |
541 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); | 541 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); | |
542 | 542 | |||
543 | /* This is not explicitly set for GEN6, so read the register. | 543 | /* This is not explicitly set for GEN6, so read the register. | |
544 | * see intel_ring_mi_set_context() for why we care. | 544 | * see intel_ring_mi_set_context() for why we care. | |
545 | * TODO: consider explicitly setting the bit for GEN5 | 545 | * TODO: consider explicitly setting the bit for GEN5 | |
546 | */ | 546 | */ | |
547 | ring->itlb_before_ctx_switch = | 547 | ring->itlb_before_ctx_switch = | |
548 | !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS); | 548 | !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS); | |
549 | } | 549 | } | |
550 | 550 | |||
551 | if (INTEL_INFO(dev)->gen >= 6) | 551 | if (INTEL_INFO(dev)->gen >= 6) | |
552 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); | 552 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); | |
553 | 553 | |||
554 | if (HAS_L3_GPU_CACHE(dev)) | 554 | if (HAS_L3_GPU_CACHE(dev)) | |
555 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); | 555 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); | |
556 | 556 | |||
557 | return ret; | 557 | return ret; | |
558 | } | 558 | } | |
559 | 559 | |||
560 | static void render_ring_cleanup(struct intel_ring_buffer *ring) | 560 | static void render_ring_cleanup(struct intel_ring_buffer *ring) | |
561 | { | 561 | { | |
562 | struct drm_device *dev = ring->dev; | 562 | struct drm_device *dev = ring->dev; | |
563 | 563 | |||
564 | if (!ring->private) | 564 | if (!ring->private) | |
565 | return; | 565 | return; | |
566 | 566 | |||
567 | if (HAS_BROKEN_CS_TLB(dev)) | 567 | if (HAS_BROKEN_CS_TLB(dev)) | |
568 | drm_gem_object_unreference(to_gem_object(ring->private)); | 568 | drm_gem_object_unreference(to_gem_object(ring->private)); | |
569 | 569 | |||
570 | cleanup_pipe_control(ring); | 570 | cleanup_pipe_control(ring); | |
571 | } | 571 | } | |
572 | 572 | |||
573 | static void | 573 | static void | |
574 | update_mboxes(struct intel_ring_buffer *ring, | 574 | update_mboxes(struct intel_ring_buffer *ring, | |
575 | u32 mmio_offset) | 575 | u32 mmio_offset) | |
576 | { | 576 | { | |
577 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); | 577 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); | |
578 | intel_ring_emit(ring, mmio_offset); | 578 | intel_ring_emit(ring, mmio_offset); | |
579 | intel_ring_emit(ring, ring->outstanding_lazy_request); | 579 | intel_ring_emit(ring, ring->outstanding_lazy_request); | |
580 | } | 580 | } | |
581 | 581 | |||
582 | /** | 582 | /** | |
583 | * gen6_add_request - Update the semaphore mailbox registers | 583 | * gen6_add_request - Update the semaphore mailbox registers | |
584 | * | 584 | * | |
585 | * @ring - ring that is adding a request | 585 | * @ring - ring that is adding a request | |
586 | * @seqno - return seqno stuck into the ring | 586 | * @seqno - return seqno stuck into the ring | |
587 | * | 587 | * | |
588 | * Update the mailbox registers in the *other* rings with the current seqno. | 588 | * Update the mailbox registers in the *other* rings with the current seqno. | |
589 | * This acts like a signal in the canonical semaphore. | 589 | * This acts like a signal in the canonical semaphore. | |
590 | */ | 590 | */ | |
591 | static int | 591 | static int | |
592 | gen6_add_request(struct intel_ring_buffer *ring) | 592 | gen6_add_request(struct intel_ring_buffer *ring) | |
593 | { | 593 | { | |
594 | u32 mbox1_reg; | 594 | u32 mbox1_reg; | |
595 | u32 mbox2_reg; | 595 | u32 mbox2_reg; | |
596 | int ret; | 596 | int ret; | |
597 | 597 | |||
598 | ret = intel_ring_begin(ring, 10); | 598 | ret = intel_ring_begin(ring, 10); | |
599 | if (ret) | 599 | if (ret) | |
600 | return ret; | 600 | return ret; | |
601 | 601 | |||
602 | mbox1_reg = ring->signal_mbox[0]; | 602 | mbox1_reg = ring->signal_mbox[0]; | |
603 | mbox2_reg = ring->signal_mbox[1]; | 603 | mbox2_reg = ring->signal_mbox[1]; | |
604 | 604 | |||
605 | update_mboxes(ring, mbox1_reg); | 605 | update_mboxes(ring, mbox1_reg); | |
606 | update_mboxes(ring, mbox2_reg); | 606 | update_mboxes(ring, mbox2_reg); | |
607 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | 607 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | |
608 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 608 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
609 | intel_ring_emit(ring, ring->outstanding_lazy_request); | 609 | intel_ring_emit(ring, ring->outstanding_lazy_request); | |
610 | intel_ring_emit(ring, MI_USER_INTERRUPT); | 610 | intel_ring_emit(ring, MI_USER_INTERRUPT); | |
611 | intel_ring_advance(ring); | 611 | intel_ring_advance(ring); | |
612 | 612 | |||
613 | return 0; | 613 | return 0; | |
614 | } | 614 | } | |
615 | 615 | |||
616 | /** | 616 | /** | |
617 | * intel_ring_sync - sync the waiter to the signaller on seqno | 617 | * intel_ring_sync - sync the waiter to the signaller on seqno | |
618 | * | 618 | * | |
619 | * @waiter - ring that is waiting | 619 | * @waiter - ring that is waiting | |
620 | * @signaller - ring which has, or will signal | 620 | * @signaller - ring which has, or will signal | |
621 | * @seqno - seqno which the waiter will block on | 621 | * @seqno - seqno which the waiter will block on | |
622 | */ | 622 | */ | |
623 | static int | 623 | static int | |
624 | gen6_ring_sync(struct intel_ring_buffer *waiter, | 624 | gen6_ring_sync(struct intel_ring_buffer *waiter, | |
625 | struct intel_ring_buffer *signaller, | 625 | struct intel_ring_buffer *signaller, | |
626 | u32 seqno) | 626 | u32 seqno) | |
627 | { | 627 | { | |
628 | int ret; | 628 | int ret; | |
629 | u32 dw1 = MI_SEMAPHORE_MBOX | | 629 | u32 dw1 = MI_SEMAPHORE_MBOX | | |
630 | MI_SEMAPHORE_COMPARE | | 630 | MI_SEMAPHORE_COMPARE | | |
631 | MI_SEMAPHORE_REGISTER; | 631 | MI_SEMAPHORE_REGISTER; | |
632 | 632 | |||
633 | /* Throughout all of the GEM code, seqno passed implies our current | 633 | /* Throughout all of the GEM code, seqno passed implies our current | |
634 | * seqno is >= the last seqno executed. However for hardware the | 634 | * seqno is >= the last seqno executed. However for hardware the | |
635 | * comparison is strictly greater than. | 635 | * comparison is strictly greater than. | |
636 | */ | 636 | */ | |
637 | seqno -= 1; | 637 | seqno -= 1; | |
638 | 638 | |||
639 | WARN_ON(signaller->semaphore_register[waiter->id] == | 639 | WARN_ON(signaller->semaphore_register[waiter->id] == | |
640 | MI_SEMAPHORE_SYNC_INVALID); | 640 | MI_SEMAPHORE_SYNC_INVALID); | |
641 | 641 | |||
642 | ret = intel_ring_begin(waiter, 4); | 642 | ret = intel_ring_begin(waiter, 4); | |
643 | if (ret) | 643 | if (ret) | |
644 | return ret; | 644 | return ret; | |
645 | 645 | |||
646 | intel_ring_emit(waiter, | 646 | intel_ring_emit(waiter, | |
647 | dw1 | signaller->semaphore_register[waiter->id]); | 647 | dw1 | signaller->semaphore_register[waiter->id]); | |
648 | intel_ring_emit(waiter, seqno); | 648 | intel_ring_emit(waiter, seqno); | |
649 | intel_ring_emit(waiter, 0); | 649 | intel_ring_emit(waiter, 0); | |
650 | intel_ring_emit(waiter, MI_NOOP); | 650 | intel_ring_emit(waiter, MI_NOOP); | |
651 | intel_ring_advance(waiter); | 651 | intel_ring_advance(waiter); | |
652 | 652 | |||
653 | return 0; | 653 | return 0; | |
654 | } | 654 | } | |
655 | 655 | |||
656 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ | 656 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ | |
657 | do { \ | 657 | do { \ | |
658 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ | 658 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ | |
659 | PIPE_CONTROL_DEPTH_STALL); \ | 659 | PIPE_CONTROL_DEPTH_STALL); \ | |
660 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ | 660 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ | |
661 | intel_ring_emit(ring__, 0); \ | 661 | intel_ring_emit(ring__, 0); \ | |
662 | intel_ring_emit(ring__, 0); \ | 662 | intel_ring_emit(ring__, 0); \ | |
663 | } while (0) | 663 | } while (0) | |
664 | 664 | |||
665 | static int | 665 | static int | |
666 | pc_render_add_request(struct intel_ring_buffer *ring) | 666 | pc_render_add_request(struct intel_ring_buffer *ring) | |
667 | { | 667 | { | |
668 | struct pipe_control *pc = ring->private; | 668 | struct pipe_control *pc = ring->private; | |
669 | u32 scratch_addr = pc->gtt_offset + 128; | 669 | u32 scratch_addr = pc->gtt_offset + 128; | |
670 | int ret; | 670 | int ret; | |
671 | 671 | |||
672 | /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently | 672 | /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently | |
673 | * incoherent with writes to memory, i.e. completely fubar, | 673 | * incoherent with writes to memory, i.e. completely fubar, | |
674 | * so we need to use PIPE_NOTIFY instead. | 674 | * so we need to use PIPE_NOTIFY instead. | |
675 | * | 675 | * | |
676 | * However, we also need to workaround the qword write | 676 | * However, we also need to workaround the qword write | |
677 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to | 677 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to | |
678 | * memory before requesting an interrupt. | 678 | * memory before requesting an interrupt. | |
679 | */ | 679 | */ | |
680 | ret = intel_ring_begin(ring, 32); | 680 | ret = intel_ring_begin(ring, 32); | |
681 | if (ret) | 681 | if (ret) | |
682 | return ret; | 682 | return ret; | |
683 | 683 | |||
684 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | | 684 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | | |
685 | PIPE_CONTROL_WRITE_FLUSH | | 685 | PIPE_CONTROL_WRITE_FLUSH | | |
686 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); | 686 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); | |
687 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | 687 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | |
688 | intel_ring_emit(ring, ring->outstanding_lazy_request); | 688 | intel_ring_emit(ring, ring->outstanding_lazy_request); | |
689 | intel_ring_emit(ring, 0); | 689 | intel_ring_emit(ring, 0); | |
690 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 690 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
691 | scratch_addr += 128; /* write to separate cachelines */ | 691 | scratch_addr += 128; /* write to separate cachelines */ | |
692 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 692 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
693 | scratch_addr += 128; | 693 | scratch_addr += 128; | |
694 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 694 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
695 | scratch_addr += 128; | 695 | scratch_addr += 128; | |
696 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 696 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
697 | scratch_addr += 128; | 697 | scratch_addr += 128; | |
698 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 698 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
699 | scratch_addr += 128; | 699 | scratch_addr += 128; | |
700 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 700 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
701 | 701 | |||
702 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | | 702 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | | |
703 | PIPE_CONTROL_WRITE_FLUSH | | 703 | PIPE_CONTROL_WRITE_FLUSH | | |
704 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | | 704 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | | |
705 | PIPE_CONTROL_NOTIFY); | 705 | PIPE_CONTROL_NOTIFY); | |
706 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | 706 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | |
707 | intel_ring_emit(ring, ring->outstanding_lazy_request); | 707 | intel_ring_emit(ring, ring->outstanding_lazy_request); | |
708 | intel_ring_emit(ring, 0); | 708 | intel_ring_emit(ring, 0); | |
709 | intel_ring_advance(ring); | 709 | intel_ring_advance(ring); | |
710 | 710 | |||
711 | return 0; | 711 | return 0; | |
712 | } | 712 | } | |
713 | 713 | |||
714 | static u32 | 714 | static u32 | |
715 | gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) | 715 | gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) | |
716 | { | 716 | { | |
717 | /* Workaround to force correct ordering between irq and seqno writes on | 717 | /* Workaround to force correct ordering between irq and seqno writes on | |
718 | * ivb (and maybe also on snb) by reading from a CS register (like | 718 | * ivb (and maybe also on snb) by reading from a CS register (like | |
719 | * ACTHD) before reading the status page. */ | 719 | * ACTHD) before reading the status page. */ | |
720 | if (!lazy_coherency) | 720 | if (!lazy_coherency) | |
721 | intel_ring_get_active_head(ring); | 721 | intel_ring_get_active_head(ring); | |
722 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 722 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | |
723 | } | 723 | } | |
724 | 724 | |||
725 | static u32 | 725 | static u32 | |
726 | ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) | 726 | ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) | |
727 | { | 727 | { | |
728 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 728 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | |
729 | } | 729 | } | |
730 | 730 | |||
731 | static u32 | 731 | static u32 | |
732 | pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) | 732 | pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) | |
733 | { | 733 | { | |
734 | struct pipe_control *pc = ring->private; | 734 | struct pipe_control *pc = ring->private; | |
735 | return pc->cpu_page[0]; | 735 | return pc->cpu_page[0]; | |
736 | } | 736 | } | |
737 | 737 | |||
738 | static bool | 738 | static bool | |
739 | gen5_ring_get_irq(struct intel_ring_buffer *ring) | 739 | gen5_ring_get_irq(struct intel_ring_buffer *ring) | |
740 | { | 740 | { | |
741 | struct drm_device *dev = ring->dev; | 741 | struct drm_device *dev = ring->dev; | |
742 | drm_i915_private_t *dev_priv = dev->dev_private; | 742 | drm_i915_private_t *dev_priv = dev->dev_private; | |
743 | unsigned long flags; | 743 | unsigned long flags; | |
744 | 744 | |||
745 | if (!dev->irq_enabled) | 745 | if (!dev->irq_enabled) | |
746 | return false; | 746 | return false; | |
747 | 747 | |||
748 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 748 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
749 | if (ring->irq_refcount++ == 0) { | 749 | if (ring->irq_refcount++ == 0) { | |
750 | dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; | 750 | dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; | |
751 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 751 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
752 | POSTING_READ(GTIMR); | 752 | POSTING_READ(GTIMR); | |
753 | } | 753 | } | |
754 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 754 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
755 | 755 | |||
756 | return true; | 756 | return true; | |
757 | } | 757 | } | |
758 | 758 | |||
759 | static void | 759 | static void | |
760 | gen5_ring_put_irq(struct intel_ring_buffer *ring) | 760 | gen5_ring_put_irq(struct intel_ring_buffer *ring) | |
761 | { | 761 | { | |
762 | struct drm_device *dev = ring->dev; | 762 | struct drm_device *dev = ring->dev; | |
763 | drm_i915_private_t *dev_priv = dev->dev_private; | 763 | drm_i915_private_t *dev_priv = dev->dev_private; | |
764 | unsigned long flags; | 764 | unsigned long flags; | |
765 | 765 | |||
766 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 766 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
767 | if (--ring->irq_refcount == 0) { | 767 | if (--ring->irq_refcount == 0) { | |
768 | dev_priv->gt_irq_mask |= ring->irq_enable_mask; | 768 | dev_priv->gt_irq_mask |= ring->irq_enable_mask; | |
769 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 769 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
770 | POSTING_READ(GTIMR); | 770 | POSTING_READ(GTIMR); | |
771 | } | 771 | } | |
772 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 772 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
773 | } | 773 | } | |
774 | 774 | |||
775 | static bool | 775 | static bool | |
776 | i9xx_ring_get_irq(struct intel_ring_buffer *ring) | 776 | i9xx_ring_get_irq(struct intel_ring_buffer *ring) | |
777 | { | 777 | { | |
778 | struct drm_device *dev = ring->dev; | 778 | struct drm_device *dev = ring->dev; | |
779 | drm_i915_private_t *dev_priv = dev->dev_private; | 779 | drm_i915_private_t *dev_priv = dev->dev_private; | |
780 | unsigned long flags; | 780 | unsigned long flags; | |
781 | 781 | |||
782 | if (!dev->irq_enabled) | 782 | if (!dev->irq_enabled) | |
783 | return false; | 783 | return false; | |
784 | 784 | |||
785 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 785 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
786 | if (ring->irq_refcount++ == 0) { | 786 | if (ring->irq_refcount++ == 0) { | |
787 | dev_priv->irq_mask &= ~ring->irq_enable_mask; | 787 | dev_priv->irq_mask &= ~ring->irq_enable_mask; | |
788 | I915_WRITE(IMR, dev_priv->irq_mask); | 788 | I915_WRITE(IMR, dev_priv->irq_mask); | |
789 | POSTING_READ(IMR); | 789 | POSTING_READ(IMR); | |
790 | } | 790 | } | |
791 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 791 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
792 | 792 | |||
793 | return true; | 793 | return true; | |
794 | } | 794 | } | |
795 | 795 | |||
796 | static void | 796 | static void | |
797 | i9xx_ring_put_irq(struct intel_ring_buffer *ring) | 797 | i9xx_ring_put_irq(struct intel_ring_buffer *ring) | |
798 | { | 798 | { | |
799 | struct drm_device *dev = ring->dev; | 799 | struct drm_device *dev = ring->dev; | |
800 | drm_i915_private_t *dev_priv = dev->dev_private; | 800 | drm_i915_private_t *dev_priv = dev->dev_private; | |
801 | unsigned long flags; | 801 | unsigned long flags; | |
802 | 802 | |||
803 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 803 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
804 | if (--ring->irq_refcount == 0) { | 804 | if (--ring->irq_refcount == 0) { | |
805 | dev_priv->irq_mask |= ring->irq_enable_mask; | 805 | dev_priv->irq_mask |= ring->irq_enable_mask; | |
806 | I915_WRITE(IMR, dev_priv->irq_mask); | 806 | I915_WRITE(IMR, dev_priv->irq_mask); | |
807 | POSTING_READ(IMR); | 807 | POSTING_READ(IMR); | |
808 | } | 808 | } | |
809 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 809 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
810 | } | 810 | } | |
811 | 811 | |||
812 | static bool | 812 | static bool | |
813 | i8xx_ring_get_irq(struct intel_ring_buffer *ring) | 813 | i8xx_ring_get_irq(struct intel_ring_buffer *ring) | |
814 | { | 814 | { | |
815 | struct drm_device *dev = ring->dev; | 815 | struct drm_device *dev = ring->dev; | |
816 | drm_i915_private_t *dev_priv = dev->dev_private; | 816 | drm_i915_private_t *dev_priv = dev->dev_private; | |
817 | unsigned long flags; | 817 | unsigned long flags; | |
818 | 818 | |||
819 | if (!dev->irq_enabled) | 819 | if (!dev->irq_enabled) | |
820 | return false; | 820 | return false; | |
821 | 821 | |||
822 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 822 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
823 | if (ring->irq_refcount++ == 0) { | 823 | if (ring->irq_refcount++ == 0) { | |
824 | dev_priv->irq_mask &= ~ring->irq_enable_mask; | 824 | dev_priv->irq_mask &= ~ring->irq_enable_mask; | |
825 | I915_WRITE16(IMR, dev_priv->irq_mask); | 825 | I915_WRITE16(IMR, dev_priv->irq_mask); | |
826 | POSTING_READ16(IMR); | 826 | POSTING_READ16(IMR); | |
827 | } | 827 | } | |
828 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 828 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
829 | 829 | |||
830 | return true; | 830 | return true; | |
831 | } | 831 | } | |
832 | 832 | |||
833 | static void | 833 | static void | |
834 | i8xx_ring_put_irq(struct intel_ring_buffer *ring) | 834 | i8xx_ring_put_irq(struct intel_ring_buffer *ring) | |
835 | { | 835 | { | |
836 | struct drm_device *dev = ring->dev; | 836 | struct drm_device *dev = ring->dev; | |
837 | drm_i915_private_t *dev_priv = dev->dev_private; | 837 | drm_i915_private_t *dev_priv = dev->dev_private; | |
838 | unsigned long flags; | 838 | unsigned long flags; | |
839 | 839 | |||
840 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 840 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
841 | if (--ring->irq_refcount == 0) { | 841 | if (--ring->irq_refcount == 0) { | |
842 | dev_priv->irq_mask |= ring->irq_enable_mask; | 842 | dev_priv->irq_mask |= ring->irq_enable_mask; | |
843 | I915_WRITE16(IMR, dev_priv->irq_mask); | 843 | I915_WRITE16(IMR, dev_priv->irq_mask); | |
844 | POSTING_READ16(IMR); | 844 | POSTING_READ16(IMR); | |
845 | } | 845 | } | |
846 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 846 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
847 | } | 847 | } | |
848 | 848 | |||
849 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | 849 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | |
850 | { | 850 | { | |
851 | struct drm_device *dev = ring->dev; | 851 | struct drm_device *dev = ring->dev; | |
852 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 852 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | |
853 | u32 mmio = 0; | 853 | u32 mmio = 0; | |
854 | 854 | |||
855 | /* The ring status page addresses are no longer next to the rest of | 855 | /* The ring status page addresses are no longer next to the rest of | |
856 | * the ring registers as of gen7. | 856 | * the ring registers as of gen7. | |
857 | */ | 857 | */ | |
858 | if (IS_GEN7(dev)) { | 858 | if (IS_GEN7(dev)) { | |
859 | switch (ring->id) { | 859 | switch (ring->id) { | |
860 | case RCS: | 860 | case RCS: | |
861 | mmio = RENDER_HWS_PGA_GEN7; | 861 | mmio = RENDER_HWS_PGA_GEN7; | |
862 | break; | 862 | break; | |
863 | case BCS: | 863 | case BCS: | |
864 | mmio = BLT_HWS_PGA_GEN7; | 864 | mmio = BLT_HWS_PGA_GEN7; | |
865 | break; | 865 | break; | |
866 | case VCS: | 866 | case VCS: | |
867 | mmio = BSD_HWS_PGA_GEN7; | 867 | mmio = BSD_HWS_PGA_GEN7; | |
868 | break; | 868 | break; | |
869 | } | 869 | } | |
870 | } else if (IS_GEN6(ring->dev)) { | 870 | } else if (IS_GEN6(ring->dev)) { | |
871 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); | 871 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); | |
872 | } else { | 872 | } else { | |
873 | mmio = RING_HWS_PGA(ring->mmio_base); | 873 | mmio = RING_HWS_PGA(ring->mmio_base); | |
874 | } | 874 | } | |
875 | 875 | |||
876 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); | 876 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); | |
877 | POSTING_READ(mmio); | 877 | POSTING_READ(mmio); | |
878 | } | 878 | } | |
879 | 879 | |||
880 | static int | 880 | static int | |
881 | bsd_ring_flush(struct intel_ring_buffer *ring, | 881 | bsd_ring_flush(struct intel_ring_buffer *ring, | |
882 | u32 invalidate_domains, | 882 | u32 invalidate_domains, | |
883 | u32 flush_domains) | 883 | u32 flush_domains) | |
884 | { | 884 | { | |
885 | int ret; | 885 | int ret; | |
886 | 886 | |||
887 | ret = intel_ring_begin(ring, 2); | 887 | ret = intel_ring_begin(ring, 2); | |
888 | if (ret) | 888 | if (ret) | |
889 | return ret; | 889 | return ret; | |
890 | 890 | |||
891 | intel_ring_emit(ring, MI_FLUSH); | 891 | intel_ring_emit(ring, MI_FLUSH); | |
892 | intel_ring_emit(ring, MI_NOOP); | 892 | intel_ring_emit(ring, MI_NOOP); | |
893 | intel_ring_advance(ring); | 893 | intel_ring_advance(ring); | |
894 | return 0; | 894 | return 0; | |
895 | } | 895 | } | |
896 | 896 | |||
897 | static int | 897 | static int | |
898 | i9xx_add_request(struct intel_ring_buffer *ring) | 898 | i9xx_add_request(struct intel_ring_buffer *ring) | |
899 | { | 899 | { | |
900 | int ret; | 900 | int ret; | |
901 | 901 | |||
902 | ret = intel_ring_begin(ring, 4); | 902 | ret = intel_ring_begin(ring, 4); | |
903 | if (ret) | 903 | if (ret) | |
904 | return ret; | 904 | return ret; | |
905 | 905 | |||
906 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | 906 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | |
907 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 907 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
908 | intel_ring_emit(ring, ring->outstanding_lazy_request); | 908 | intel_ring_emit(ring, ring->outstanding_lazy_request); | |
909 | intel_ring_emit(ring, MI_USER_INTERRUPT); | 909 | intel_ring_emit(ring, MI_USER_INTERRUPT); | |
910 | intel_ring_advance(ring); | 910 | intel_ring_advance(ring); | |
911 | 911 | |||
912 | return 0; | 912 | return 0; | |
913 | } | 913 | } | |
914 | 914 | |||
915 | static bool | 915 | static bool | |
916 | gen6_ring_get_irq(struct intel_ring_buffer *ring) | 916 | gen6_ring_get_irq(struct intel_ring_buffer *ring) | |
917 | { | 917 | { | |
918 | struct drm_device *dev = ring->dev; | 918 | struct drm_device *dev = ring->dev; | |
919 | drm_i915_private_t *dev_priv = dev->dev_private; | 919 | drm_i915_private_t *dev_priv = dev->dev_private; | |
920 | unsigned long flags; | 920 | unsigned long flags; | |
921 | 921 | |||
922 | if (!dev->irq_enabled) | 922 | if (!dev->irq_enabled) | |
923 | return false; | 923 | return false; | |
924 | 924 | |||
925 | /* It looks like we need to prevent the gt from suspending while waiting | 925 | /* It looks like we need to prevent the gt from suspending while waiting | |
926 | * for an notifiy irq, otherwise irqs seem to get lost on at least the | 926 | * for an notifiy irq, otherwise irqs seem to get lost on at least the | |
927 | * blt/bsd rings on ivb. */ | 927 | * blt/bsd rings on ivb. */ | |
928 | gen6_gt_force_wake_get(dev_priv); | 928 | gen6_gt_force_wake_get(dev_priv); | |
929 | 929 | |||
930 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 930 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
931 | if (ring->irq_refcount++ == 0) { | 931 | if (ring->irq_refcount++ == 0) { | |
932 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) | 932 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) | |
933 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | | 933 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | | |
934 | GEN6_RENDER_L3_PARITY_ERROR)); | 934 | GEN6_RENDER_L3_PARITY_ERROR)); | |
935 | else | 935 | else | |
936 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); | 936 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); | |
937 | dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; | 937 | dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; | |
938 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 938 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
939 | POSTING_READ(GTIMR); | 939 | POSTING_READ(GTIMR); | |
940 | } | 940 | } | |
941 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 941 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
942 | 942 | |||
943 | return true; | 943 | return true; | |
944 | } | 944 | } | |
945 | 945 | |||
946 | static void | 946 | static void | |
947 | gen6_ring_put_irq(struct intel_ring_buffer *ring) | 947 | gen6_ring_put_irq(struct intel_ring_buffer *ring) | |
948 | { | 948 | { | |
949 | struct drm_device *dev = ring->dev; | 949 | struct drm_device *dev = ring->dev; | |
950 | drm_i915_private_t *dev_priv = dev->dev_private; | 950 | drm_i915_private_t *dev_priv = dev->dev_private; | |
951 | unsigned long flags; | 951 | unsigned long flags; | |
952 | 952 | |||
953 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 953 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
954 | if (--ring->irq_refcount == 0) { | 954 | if (--ring->irq_refcount == 0) { | |
955 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) | 955 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) | |
956 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); | 956 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); | |
957 | else | 957 | else | |
958 | I915_WRITE_IMR(ring, ~0); | 958 | I915_WRITE_IMR(ring, ~0); | |
959 | dev_priv->gt_irq_mask |= ring->irq_enable_mask; | 959 | dev_priv->gt_irq_mask |= ring->irq_enable_mask; | |
960 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 960 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
961 | POSTING_READ(GTIMR); | 961 | POSTING_READ(GTIMR); | |
962 | } | 962 | } | |
963 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 963 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
964 | 964 | |||
965 | gen6_gt_force_wake_put(dev_priv); | 965 | gen6_gt_force_wake_put(dev_priv); | |
966 | } | 966 | } | |
967 | 967 | |||
968 | static int | 968 | static int | |
969 | i965_dispatch_execbuffer(struct intel_ring_buffer *ring, | 969 | i965_dispatch_execbuffer(struct intel_ring_buffer *ring, | |
970 | u32 offset, u32 length, | 970 | u32 offset, u32 length, | |
971 | unsigned flags) | 971 | unsigned flags) | |
972 | { | 972 | { | |
973 | int ret; | 973 | int ret; | |
974 | 974 | |||
975 | ret = intel_ring_begin(ring, 2); | 975 | ret = intel_ring_begin(ring, 2); | |
976 | if (ret) | 976 | if (ret) | |
977 | return ret; | 977 | return ret; | |
978 | 978 | |||
979 | intel_ring_emit(ring, | 979 | intel_ring_emit(ring, | |
980 | MI_BATCH_BUFFER_START | | 980 | MI_BATCH_BUFFER_START | | |
981 | MI_BATCH_GTT | | 981 | MI_BATCH_GTT | | |
982 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); | 982 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); | |
983 | intel_ring_emit(ring, offset); | 983 | intel_ring_emit(ring, offset); | |
984 | intel_ring_advance(ring); | 984 | intel_ring_advance(ring); | |
985 | 985 | |||
986 | return 0; | 986 | return 0; | |
987 | } | 987 | } | |
988 | 988 | |||
989 | /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ | 989 | /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ | |
990 | #define I830_BATCH_LIMIT (256*1024) | 990 | #define I830_BATCH_LIMIT (256*1024) | |
991 | static int | 991 | static int | |
992 | i830_dispatch_execbuffer(struct intel_ring_buffer *ring, | 992 | i830_dispatch_execbuffer(struct intel_ring_buffer *ring, | |
993 | u32 offset, u32 len, | 993 | u32 offset, u32 len, | |
994 | unsigned flags) | 994 | unsigned flags) | |
995 | { | 995 | { | |
996 | int ret; | 996 | int ret; | |
997 | 997 | |||
998 | if (flags & I915_DISPATCH_PINNED) { | 998 | if (flags & I915_DISPATCH_PINNED) { | |
999 | ret = intel_ring_begin(ring, 4); | 999 | ret = intel_ring_begin(ring, 4); | |
1000 | if (ret) | 1000 | if (ret) | |
1001 | return ret; | 1001 | return ret; | |
1002 | 1002 | |||
1003 | intel_ring_emit(ring, MI_BATCH_BUFFER); | 1003 | intel_ring_emit(ring, MI_BATCH_BUFFER); | |
1004 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | 1004 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | |
1005 | intel_ring_emit(ring, offset + len - 8); | 1005 | intel_ring_emit(ring, offset + len - 8); | |
1006 | intel_ring_emit(ring, MI_NOOP); | 1006 | intel_ring_emit(ring, MI_NOOP); | |
1007 | intel_ring_advance(ring); | 1007 | intel_ring_advance(ring); | |
1008 | } else { | 1008 | } else { | |
1009 | struct drm_i915_gem_object *obj = ring->private; | 1009 | struct drm_i915_gem_object *obj = ring->private; | |
1010 | u32 cs_offset = obj->gtt_offset; | 1010 | u32 cs_offset = obj->gtt_offset; | |
1011 | 1011 | |||
1012 | if (len > I830_BATCH_LIMIT) | 1012 | if (len > I830_BATCH_LIMIT) | |
1013 | return -ENOSPC; | 1013 | return -ENOSPC; | |
1014 | 1014 | |||
1015 | ret = intel_ring_begin(ring, 9+3); | 1015 | ret = intel_ring_begin(ring, 9+3); | |
1016 | if (ret) | 1016 | if (ret) | |
1017 | return ret; | 1017 | return ret; | |
1018 | /* Blit the batch (which has now all relocs applied) to the stable batch | 1018 | /* Blit the batch (which has now all relocs applied) to the stable batch | |
1019 | * scratch bo area (so that the CS never stumbles over its tlb | 1019 | * scratch bo area (so that the CS never stumbles over its tlb | |
1020 | * invalidation bug) ... */ | 1020 | * invalidation bug) ... */ | |
1021 | intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | | 1021 | intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | | |
1022 | XY_SRC_COPY_BLT_WRITE_ALPHA | | 1022 | XY_SRC_COPY_BLT_WRITE_ALPHA | | |
1023 | XY_SRC_COPY_BLT_WRITE_RGB); | 1023 | XY_SRC_COPY_BLT_WRITE_RGB); | |
1024 | intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); | 1024 | intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); | |
1025 | intel_ring_emit(ring, 0); | 1025 | intel_ring_emit(ring, 0); | |
1026 | intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); | 1026 | intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); | |
1027 | intel_ring_emit(ring, cs_offset); | 1027 | intel_ring_emit(ring, cs_offset); | |
1028 | intel_ring_emit(ring, 0); | 1028 | intel_ring_emit(ring, 0); | |
1029 | intel_ring_emit(ring, 4096); | 1029 | intel_ring_emit(ring, 4096); | |
1030 | intel_ring_emit(ring, offset); | 1030 | intel_ring_emit(ring, offset); | |
1031 | intel_ring_emit(ring, MI_FLUSH); | 1031 | intel_ring_emit(ring, MI_FLUSH); | |
1032 | 1032 | |||
1033 | /* ... and execute it. */ | 1033 | /* ... and execute it. */ | |
1034 | intel_ring_emit(ring, MI_BATCH_BUFFER); | 1034 | intel_ring_emit(ring, MI_BATCH_BUFFER); | |
1035 | intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | 1035 | intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | |
1036 | intel_ring_emit(ring, cs_offset + len - 8); | 1036 | intel_ring_emit(ring, cs_offset + len - 8); | |
1037 | intel_ring_advance(ring); | 1037 | intel_ring_advance(ring); | |
1038 | } | 1038 | } | |
1039 | 1039 | |||
1040 | return 0; | 1040 | return 0; | |
1041 | } | 1041 | } | |
1042 | 1042 | |||
1043 | static int | 1043 | static int | |
1044 | i915_dispatch_execbuffer(struct intel_ring_buffer *ring, | 1044 | i915_dispatch_execbuffer(struct intel_ring_buffer *ring, | |
1045 | u32 offset, u32 len, | 1045 | u32 offset, u32 len, | |
1046 | unsigned flags) | 1046 | unsigned flags) | |
1047 | { | 1047 | { | |
1048 | int ret; | 1048 | int ret; | |
1049 | 1049 | |||
1050 | ret = intel_ring_begin(ring, 2); | 1050 | ret = intel_ring_begin(ring, 2); | |
1051 | if (ret) | 1051 | if (ret) | |
1052 | return ret; | 1052 | return ret; | |
1053 | 1053 | |||
1054 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); | 1054 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); | |
1055 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | 1055 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | |
1056 | intel_ring_advance(ring); | 1056 | intel_ring_advance(ring); | |
1057 | 1057 | |||
1058 | return 0; | 1058 | return 0; | |
1059 | } | 1059 | } | |
1060 | 1060 | |||
1061 | static void cleanup_status_page(struct intel_ring_buffer *ring) | 1061 | static void cleanup_status_page(struct intel_ring_buffer *ring) | |
1062 | { | 1062 | { | |
1063 | struct drm_i915_gem_object *obj; | 1063 | struct drm_i915_gem_object *obj; | |
1064 | 1064 | |||
1065 | obj = ring->status_page.obj; | 1065 | obj = ring->status_page.obj; | |
1066 | if (obj == NULL) | 1066 | if (obj == NULL) | |
1067 | return; | 1067 | return; | |
1068 | 1068 | |||
1069 | kunmap(sg_page(obj->pages->sgl)); | 1069 | kunmap(sg_page(obj->pages->sgl)); | |
1070 | i915_gem_object_unpin(obj); | 1070 | i915_gem_object_unpin(obj); | |
1071 | drm_gem_object_unreference(&obj->base); | 1071 | drm_gem_object_unreference(&obj->base); | |
1072 | ring->status_page.obj = NULL; | 1072 | ring->status_page.obj = NULL; | |
1073 | } | 1073 | } | |
1074 | 1074 | |||
1075 | static int init_status_page(struct intel_ring_buffer *ring) | 1075 | static int init_status_page(struct intel_ring_buffer *ring) | |
1076 | { | 1076 | { | |
1077 | struct drm_device *dev = ring->dev; | 1077 | struct drm_device *dev = ring->dev; | |
1078 | struct drm_i915_gem_object *obj; | 1078 | struct drm_i915_gem_object *obj; | |
1079 | int ret; | 1079 | int ret; | |
1080 | 1080 | |||
1081 | obj = i915_gem_alloc_object(dev, 4096); | 1081 | obj = i915_gem_alloc_object(dev, 4096); | |
1082 | if (obj == NULL) { | 1082 | if (obj == NULL) { | |
1083 | DRM_ERROR("Failed to allocate status page\n"); | 1083 | DRM_ERROR("Failed to allocate status page\n"); | |
1084 | ret = -ENOMEM; | 1084 | ret = -ENOMEM; | |
1085 | goto err; | 1085 | goto err; | |
1086 | } | 1086 | } | |
1087 | 1087 | |||
1088 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | 1088 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | |
1089 | 1089 | |||
1090 | ret = i915_gem_object_pin(obj, 4096, true, false); | 1090 | ret = i915_gem_object_pin(obj, 4096, true, false); | |
1091 | if (ret != 0) { | 1091 | if (ret != 0) { | |
1092 | goto err_unref; | 1092 | goto err_unref; | |
1093 | } | 1093 | } | |
1094 | 1094 | |||
1095 | ring->status_page.gfx_addr = obj->gtt_offset; | 1095 | ring->status_page.gfx_addr = obj->gtt_offset; | |
1096 | ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); | 1096 | ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); | |
1097 | if (ring->status_page.page_addr == NULL) { | 1097 | if (ring->status_page.page_addr == NULL) { | |
1098 | ret = -ENOMEM; | 1098 | ret = -ENOMEM; | |
1099 | goto err_unpin; | 1099 | goto err_unpin; | |
1100 | } | 1100 | } | |
1101 | ring->status_page.obj = obj; | 1101 | ring->status_page.obj = obj; | |
1102 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 1102 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | |
1103 | 1103 | |||
1104 | intel_ring_setup_status_page(ring); | 1104 | intel_ring_setup_status_page(ring); | |
1105 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", | 1105 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", | |
1106 | ring->name, ring->status_page.gfx_addr); | 1106 | ring->name, ring->status_page.gfx_addr); | |
1107 | 1107 | |||
1108 | return 0; | 1108 | return 0; | |
1109 | 1109 | |||
1110 | err_unpin: | 1110 | err_unpin: | |
1111 | i915_gem_object_unpin(obj); | 1111 | i915_gem_object_unpin(obj); | |
1112 | err_unref: | 1112 | err_unref: | |
1113 | drm_gem_object_unreference(&obj->base); | 1113 | drm_gem_object_unreference(&obj->base); | |
1114 | err: | 1114 | err: | |
1115 | return ret; | 1115 | return ret; | |
1116 | } | 1116 | } | |
1117 | 1117 | |||
1118 | static int init_phys_hws_pga(struct intel_ring_buffer *ring) | 1118 | static int init_phys_hws_pga(struct intel_ring_buffer *ring) | |
1119 | { | 1119 | { | |
1120 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 1120 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
1121 | u32 addr; | 1121 | u32 addr; | |
1122 | 1122 | |||
1123 | if (!dev_priv->status_page_dmah) { | 1123 | if (!dev_priv->status_page_dmah) { | |
1124 | dev_priv->status_page_dmah = | 1124 | dev_priv->status_page_dmah = | |
1125 | drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); | 1125 | drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); | |
1126 | if (!dev_priv->status_page_dmah) | 1126 | if (!dev_priv->status_page_dmah) | |
1127 | return -ENOMEM; | 1127 | return -ENOMEM; | |
1128 | } | 1128 | } | |
1129 | 1129 | |||
1130 | addr = dev_priv->status_page_dmah->busaddr; | 1130 | addr = dev_priv->status_page_dmah->busaddr; | |
1131 | if (INTEL_INFO(ring->dev)->gen >= 4) | 1131 | if (INTEL_INFO(ring->dev)->gen >= 4) | |
1132 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; | 1132 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; | |
1133 | I915_WRITE(HWS_PGA, addr); | 1133 | I915_WRITE(HWS_PGA, addr); | |
1134 | 1134 | |||
1135 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; | 1135 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; | |
1136 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 1136 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | |
1137 | 1137 | |||
1138 | return 0; | 1138 | return 0; | |
1139 | } | 1139 | } | |
1140 | 1140 | |||
1141 | static int intel_init_ring_buffer(struct drm_device *dev, | 1141 | static int intel_init_ring_buffer(struct drm_device *dev, | |
1142 | struct intel_ring_buffer *ring) | 1142 | struct intel_ring_buffer *ring) | |
1143 | { | 1143 | { | |
1144 | struct drm_i915_gem_object *obj; | 1144 | struct drm_i915_gem_object *obj; | |
1145 | struct drm_i915_private *dev_priv = dev->dev_private; | 1145 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1146 | int ret; | 1146 | int ret; | |
1147 | 1147 | |||
1148 | ring->dev = dev; | 1148 | ring->dev = dev; | |
1149 | INIT_LIST_HEAD(&ring->active_list); | 1149 | INIT_LIST_HEAD(&ring->active_list); | |
1150 | INIT_LIST_HEAD(&ring->request_list); | 1150 | INIT_LIST_HEAD(&ring->request_list); | |
1151 | ring->size = 32 * PAGE_SIZE; | 1151 | ring->size = 32 * PAGE_SIZE; | |
1152 | memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno)); | 1152 | memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno)); | |
1153 | 1153 | |||
1154 | #ifdef __NetBSD__ | |||
1155 | DRM_INIT_WAITQUEUE(&ring->irq_queue, "i915irq"); | |||
1156 | #else | |||
1154 | init_waitqueue_head(&ring->irq_queue); | 1157 | init_waitqueue_head(&ring->irq_queue); | |
1158 | #endif | |||
1155 | 1159 | |||
1156 | if (I915_NEED_GFX_HWS(dev)) { | 1160 | if (I915_NEED_GFX_HWS(dev)) { | |
1157 | ret = init_status_page(ring); | 1161 | ret = init_status_page(ring); | |
1158 | if (ret) | 1162 | if (ret) | |
1159 | return ret; | 1163 | return ret; | |
1160 | } else { | 1164 | } else { | |
1161 | BUG_ON(ring->id != RCS); | 1165 | BUG_ON(ring->id != RCS); | |
1162 | ret = init_phys_hws_pga(ring); | 1166 | ret = init_phys_hws_pga(ring); | |
1163 | if (ret) | 1167 | if (ret) | |
1164 | return ret; | 1168 | return ret; | |
1165 | } | 1169 | } | |
1166 | 1170 | |||
1167 | obj = i915_gem_alloc_object(dev, ring->size); | 1171 | obj = i915_gem_alloc_object(dev, ring->size); | |
1168 | if (obj == NULL) { | 1172 | if (obj == NULL) { | |
1169 | DRM_ERROR("Failed to allocate ringbuffer\n"); | 1173 | DRM_ERROR("Failed to allocate ringbuffer\n"); | |
1170 | ret = -ENOMEM; | 1174 | ret = -ENOMEM; | |
1171 | goto err_hws; | 1175 | goto err_hws; | |
1172 | } | 1176 | } | |
1173 | 1177 | |||
1174 | ring->obj = obj; | 1178 | ring->obj = obj; | |
1175 | 1179 | |||
1176 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); | 1180 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); | |
1177 | if (ret) | 1181 | if (ret) | |
1178 | goto err_unref; | 1182 | goto err_unref; | |
1179 | 1183 | |||
1180 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | 1184 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | |
1181 | if (ret) | 1185 | if (ret) | |
1182 | goto err_unpin; | 1186 | goto err_unpin; | |
1183 | 1187 | |||
1184 | #ifdef __NetBSD__ | 1188 | #ifdef __NetBSD__ | |
1185 | ring->virtual_start_map.offset = (dev_priv->mm.gtt->gma_bus_addr + | 1189 | ring->virtual_start_map.offset = (dev_priv->mm.gtt->gma_bus_addr + | |
1186 | obj->gtt_offset); | 1190 | obj->gtt_offset); | |
1187 | ring->virtual_start_map.size = ring->size; | 1191 | ring->virtual_start_map.size = ring->size; | |
1188 | ring->virtual_start_map.flags = 0; | 1192 | ring->virtual_start_map.flags = 0; | |
1189 | ring->virtual_start_map.flags |= _DRM_RESTRICTED; | 1193 | ring->virtual_start_map.flags |= _DRM_RESTRICTED; | |
1190 | ring->virtual_start_map.flags |= _DRM_KERNEL; | 1194 | ring->virtual_start_map.flags |= _DRM_KERNEL; | |
1191 | ring->virtual_start_map.flags |= _DRM_WRITE_COMBINING; | 1195 | ring->virtual_start_map.flags |= _DRM_WRITE_COMBINING; | |
1192 | ring->virtual_start_map.flags |= _DRM_DRIVER; | 1196 | ring->virtual_start_map.flags |= _DRM_DRIVER; | |
1193 | ret = drm_ioremap(dev, &ring->virtual_start_map); | 1197 | ret = drm_ioremap(dev, &ring->virtual_start_map); | |
1194 | if (ret) { | 1198 | if (ret) { | |
1195 | DRM_ERROR("failed to map ring buffer\n"); | 1199 | DRM_ERROR("failed to map ring buffer\n"); | |
1196 | goto err_unpin; | 1200 | goto err_unpin; | |
1197 | } | 1201 | } | |
1198 | ring->virtual_start_mapped = true; | 1202 | ring->virtual_start_mapped = true; | |
1199 | #else | 1203 | #else | |
1200 | ring->virtual_start = | 1204 | ring->virtual_start = | |
1201 | ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, | 1205 | ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, | |
1202 | ring->size); | 1206 | ring->size); | |
1203 | if (ring->virtual_start == NULL) { | 1207 | if (ring->virtual_start == NULL) { | |
1204 | DRM_ERROR("Failed to map ringbuffer.\n"); | 1208 | DRM_ERROR("Failed to map ringbuffer.\n"); | |
1205 | ret = -EINVAL; | 1209 | ret = -EINVAL; | |
1206 | goto err_unpin; | 1210 | goto err_unpin; | |
1207 | } | 1211 | } | |
1208 | #endif | 1212 | #endif | |
1209 | 1213 | |||
1210 | ret = ring->init(ring); | 1214 | ret = ring->init(ring); | |
1211 | if (ret) | 1215 | if (ret) | |
1212 | goto err_unmap; | 1216 | goto err_unmap; | |
1213 | 1217 | |||
1214 | /* Workaround an erratum on the i830 which causes a hang if | 1218 | /* Workaround an erratum on the i830 which causes a hang if | |
1215 | * the TAIL pointer points to within the last 2 cachelines | 1219 | * the TAIL pointer points to within the last 2 cachelines | |
1216 | * of the buffer. | 1220 | * of the buffer. | |
1217 | */ | 1221 | */ | |
1218 | ring->effective_size = ring->size; | 1222 | ring->effective_size = ring->size; | |
1219 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) | 1223 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) | |
1220 | ring->effective_size -= 128; | 1224 | ring->effective_size -= 128; | |
1221 | 1225 | |||
1222 | return 0; | 1226 | return 0; | |
1223 | 1227 | |||
1224 | err_unmap: | 1228 | err_unmap: | |
1225 | #ifdef __NetBSD__ | 1229 | #ifdef __NetBSD__ | |
1226 | drm_iounmap(dev, &ring->virtual_start_map); | 1230 | drm_iounmap(dev, &ring->virtual_start_map); | |
1227 | ring->virtual_start_mapped = false; | 1231 | ring->virtual_start_mapped = false; | |
1228 | #else | 1232 | #else | |
1229 | iounmap(ring->virtual_start); | 1233 | iounmap(ring->virtual_start); | |
1230 | #endif | 1234 | #endif | |
1231 | err_unpin: | 1235 | err_unpin: | |
1232 | i915_gem_object_unpin(obj); | 1236 | i915_gem_object_unpin(obj); | |
1233 | err_unref: | 1237 | err_unref: | |
1234 | drm_gem_object_unreference(&obj->base); | 1238 | drm_gem_object_unreference(&obj->base); | |
1235 | ring->obj = NULL; | 1239 | ring->obj = NULL; | |
1236 | err_hws: | 1240 | err_hws: | |
1237 | cleanup_status_page(ring); | 1241 | cleanup_status_page(ring); | |
1238 | return ret; | 1242 | return ret; | |
1239 | } | 1243 | } | |
1240 | 1244 | |||
1241 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) | 1245 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) | |
1242 | { | 1246 | { | |
1243 | struct drm_i915_private *dev_priv; | 1247 | struct drm_i915_private *dev_priv; | |
1244 | int ret; | 1248 | int ret; | |
1245 | 1249 | |||
1246 | if (ring->obj == NULL) | 1250 | if (ring->obj == NULL) | |
1247 | return; | 1251 | return; | |
1248 | 1252 | |||
1249 | /* Disable the ring buffer. The ring must be idle at this point */ | 1253 | /* Disable the ring buffer. The ring must be idle at this point */ | |
1250 | dev_priv = ring->dev->dev_private; | 1254 | dev_priv = ring->dev->dev_private; | |
1251 | ret = intel_ring_idle(ring); | 1255 | ret = intel_ring_idle(ring); | |
1252 | if (ret) | 1256 | if (ret) | |
1253 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | 1257 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | |
1254 | ring->name, ret); | 1258 | ring->name, ret); | |
1255 | 1259 | |||
1256 | I915_WRITE_CTL(ring, 0); | 1260 | I915_WRITE_CTL(ring, 0); | |
1257 | 1261 | |||
1258 | #ifdef __NetBSD__ | 1262 | #ifdef __NetBSD__ | |
1259 | drm_iounmap(dev, &ring->virtual_start_map); | 1263 | drm_iounmap(dev, &ring->virtual_start_map); | |
1260 | ring->virtual_start_mapped = false; | 1264 | ring->virtual_start_mapped = false; | |
1261 | #else | 1265 | #else | |
1262 | iounmap(ring->virtual_start); | 1266 | iounmap(ring->virtual_start); | |
1263 | #endif | 1267 | #endif | |
1264 | 1268 | |||
1265 | i915_gem_object_unpin(ring->obj); | 1269 | i915_gem_object_unpin(ring->obj); | |
1266 | drm_gem_object_unreference(&ring->obj->base); | 1270 | drm_gem_object_unreference(&ring->obj->base); | |
1267 | ring->obj = NULL; | 1271 | ring->obj = NULL; | |
1268 | 1272 | |||
1269 | if (ring->cleanup) | 1273 | if (ring->cleanup) | |
1270 | ring->cleanup(ring); | 1274 | ring->cleanup(ring); | |
1271 | 1275 | |||
1272 | cleanup_status_page(ring); | 1276 | cleanup_status_page(ring); | |
1273 | } | 1277 | } | |
1274 | 1278 | |||
1275 | static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) | 1279 | static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) | |
1276 | { | 1280 | { | |
1277 | int ret; | 1281 | int ret; | |
1278 | 1282 | |||
1279 | ret = i915_wait_seqno(ring, seqno); | 1283 | ret = i915_wait_seqno(ring, seqno); | |
1280 | if (!ret) | 1284 | if (!ret) | |
1281 | i915_gem_retire_requests_ring(ring); | 1285 | i915_gem_retire_requests_ring(ring); | |
1282 | 1286 | |||
1283 | return ret; | 1287 | return ret; | |
1284 | } | 1288 | } | |
1285 | 1289 | |||
1286 | static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) | 1290 | static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) | |
1287 | { | 1291 | { | |
1288 | struct drm_i915_gem_request *request; | 1292 | struct drm_i915_gem_request *request; | |
1289 | u32 seqno = 0; | 1293 | u32 seqno = 0; | |
1290 | int ret; | 1294 | int ret; | |
1291 | 1295 | |||
1292 | i915_gem_retire_requests_ring(ring); | 1296 | i915_gem_retire_requests_ring(ring); | |
1293 | 1297 | |||
1294 | if (ring->last_retired_head != -1) { | 1298 | if (ring->last_retired_head != -1) { | |
1295 | ring->head = ring->last_retired_head; | 1299 | ring->head = ring->last_retired_head; | |
1296 | ring->last_retired_head = -1; | 1300 | ring->last_retired_head = -1; | |
1297 | ring->space = ring_space(ring); | 1301 | ring->space = ring_space(ring); | |
1298 | if (ring->space >= n) | 1302 | if (ring->space >= n) | |
1299 | return 0; | 1303 | return 0; | |
1300 | } | 1304 | } | |
1301 | 1305 | |||
1302 | list_for_each_entry(request, &ring->request_list, list) { | 1306 | list_for_each_entry(request, &ring->request_list, list) { | |
1303 | int space; | 1307 | int space; | |
1304 | 1308 | |||
1305 | if (request->tail == -1) | 1309 | if (request->tail == -1) | |
1306 | continue; | 1310 | continue; | |
1307 | 1311 | |||
1308 | space = request->tail - (ring->tail + I915_RING_FREE_SPACE); | 1312 | space = request->tail - (ring->tail + I915_RING_FREE_SPACE); | |
1309 | if (space < 0) | 1313 | if (space < 0) | |
1310 | space += ring->size; | 1314 | space += ring->size; | |
1311 | if (space >= n) { | 1315 | if (space >= n) { | |
1312 | seqno = request->seqno; | 1316 | seqno = request->seqno; | |
1313 | break; | 1317 | break; | |
1314 | } | 1318 | } | |
1315 | 1319 | |||
1316 | /* Consume this request in case we need more space than | 1320 | /* Consume this request in case we need more space than | |
1317 | * is available and so need to prevent a race between | 1321 | * is available and so need to prevent a race between | |
1318 | * updating last_retired_head and direct reads of | 1322 | * updating last_retired_head and direct reads of | |
1319 | * I915_RING_HEAD. It also provides a nice sanity check. | 1323 | * I915_RING_HEAD. It also provides a nice sanity check. | |
1320 | */ | 1324 | */ | |
1321 | request->tail = -1; | 1325 | request->tail = -1; | |
1322 | } | 1326 | } | |
1323 | 1327 | |||
1324 | if (seqno == 0) | 1328 | if (seqno == 0) | |
1325 | return -ENOSPC; | 1329 | return -ENOSPC; | |
1326 | 1330 | |||
1327 | ret = intel_ring_wait_seqno(ring, seqno); | 1331 | ret = intel_ring_wait_seqno(ring, seqno); | |
1328 | if (ret) | 1332 | if (ret) | |
1329 | return ret; | 1333 | return ret; | |
1330 | 1334 | |||
1331 | if (WARN_ON(ring->last_retired_head == -1)) | 1335 | if (WARN_ON(ring->last_retired_head == -1)) | |
1332 | return -ENOSPC; | 1336 | return -ENOSPC; | |
1333 | 1337 | |||
1334 | ring->head = ring->last_retired_head; | 1338 | ring->head = ring->last_retired_head; | |
1335 | ring->last_retired_head = -1; | 1339 | ring->last_retired_head = -1; | |
1336 | ring->space = ring_space(ring); | 1340 | ring->space = ring_space(ring); | |
1337 | if (WARN_ON(ring->space < n)) | 1341 | if (WARN_ON(ring->space < n)) | |
1338 | return -ENOSPC; | 1342 | return -ENOSPC; | |
1339 | 1343 | |||
1340 | return 0; | 1344 | return 0; | |
1341 | } | 1345 | } | |
1342 | 1346 | |||
1343 | static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) | 1347 | static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) | |
1344 | { | 1348 | { | |
1345 | struct drm_device *dev = ring->dev; | 1349 | struct drm_device *dev = ring->dev; | |
1346 | struct drm_i915_private *dev_priv = dev->dev_private; | 1350 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1347 | unsigned long end; | 1351 | unsigned long end; | |
1348 | int ret; | 1352 | int ret; | |
1349 | 1353 | |||
1350 | ret = intel_ring_wait_request(ring, n); | 1354 | ret = intel_ring_wait_request(ring, n); | |
1351 | if (ret != -ENOSPC) | 1355 | if (ret != -ENOSPC) | |
1352 | return ret; | 1356 | return ret; | |
1353 | 1357 | |||
1354 | trace_i915_ring_wait_begin(ring); | 1358 | trace_i915_ring_wait_begin(ring); | |
1355 | /* With GEM the hangcheck timer should kick us out of the loop, | 1359 | /* With GEM the hangcheck timer should kick us out of the loop, | |
1356 | * leaving it early runs the risk of corrupting GEM state (due | 1360 | * leaving it early runs the risk of corrupting GEM state (due | |
1357 | * to running on almost untested codepaths). But on resume | 1361 | * to running on almost untested codepaths). But on resume | |
1358 | * timers don't work yet, so prevent a complete hang in that | 1362 | * timers don't work yet, so prevent a complete hang in that | |
1359 | * case by choosing an insanely large timeout. */ | 1363 | * case by choosing an insanely large timeout. */ | |
1360 | end = jiffies + 60 * HZ; | 1364 | end = jiffies + 60 * HZ; | |
1361 | 1365 | |||
1362 | do { | 1366 | do { | |
1363 | ring->head = I915_READ_HEAD(ring); | 1367 | ring->head = I915_READ_HEAD(ring); | |
1364 | ring->space = ring_space(ring); | 1368 | ring->space = ring_space(ring); | |
1365 | if (ring->space >= n) { | 1369 | if (ring->space >= n) { | |
1366 | trace_i915_ring_wait_end(ring); | 1370 | trace_i915_ring_wait_end(ring); | |
1367 | return 0; | 1371 | return 0; | |
1368 | } | 1372 | } | |
1369 | 1373 | |||
1370 | if (dev->primary->master) { | 1374 | if (dev->primary->master) { | |
1371 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | 1375 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
1372 | if (master_priv->sarea_priv) | 1376 | if (master_priv->sarea_priv) | |
1373 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 1377 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | |
1374 | } | 1378 | } | |
1375 | 1379 | |||
1376 | msleep(1); | 1380 | msleep(1); | |
1377 | 1381 | |||
1378 | ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); | 1382 | ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); | |
1379 | if (ret) | 1383 | if (ret) | |
1380 | return ret; | 1384 | return ret; | |
1381 | } while (!time_after(jiffies, end)); | 1385 | } while (!time_after(jiffies, end)); | |
1382 | trace_i915_ring_wait_end(ring); | 1386 | trace_i915_ring_wait_end(ring); | |
1383 | return -EBUSY; | 1387 | return -EBUSY; | |
1384 | } | 1388 | } | |
1385 | 1389 | |||
1386 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) | 1390 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) | |
1387 | { | 1391 | { | |
1388 | uint32_t __iomem *virt; | 1392 | uint32_t __iomem *virt; | |
1389 | int rem = ring->size - ring->tail; | 1393 | int rem = ring->size - ring->tail; | |
1390 | 1394 | |||
1391 | if (ring->space < rem) { | 1395 | if (ring->space < rem) { | |
1392 | int ret = ring_wait_for_space(ring, rem); | 1396 | int ret = ring_wait_for_space(ring, rem); | |
1393 | if (ret) | 1397 | if (ret) | |
1394 | return ret; | 1398 | return ret; | |
1395 | } | 1399 | } | |
1396 | 1400 | |||
1397 | virt = ring->virtual_start + ring->tail; | 1401 | virt = ring->virtual_start + ring->tail; | |
1398 | rem /= 4; | 1402 | rem /= 4; | |
1399 | while (rem--) | 1403 | while (rem--) | |
1400 | iowrite32(MI_NOOP, virt++); | 1404 | iowrite32(MI_NOOP, virt++); | |
1401 | 1405 | |||
1402 | ring->tail = 0; | 1406 | ring->tail = 0; | |
1403 | ring->space = ring_space(ring); | 1407 | ring->space = ring_space(ring); | |
1404 | 1408 | |||
1405 | return 0; | 1409 | return 0; | |
1406 | } | 1410 | } | |
1407 | 1411 | |||
1408 | int intel_ring_idle(struct intel_ring_buffer *ring) | 1412 | int intel_ring_idle(struct intel_ring_buffer *ring) | |
1409 | { | 1413 | { | |
1410 | u32 seqno; | 1414 | u32 seqno; | |
1411 | int ret; | 1415 | int ret; | |
1412 | 1416 | |||
1413 | /* We need to add any requests required to flush the objects and ring */ | 1417 | /* We need to add any requests required to flush the objects and ring */ | |
1414 | if (ring->outstanding_lazy_request) { | 1418 | if (ring->outstanding_lazy_request) { | |
1415 | ret = i915_add_request(ring, NULL, NULL); | 1419 | ret = i915_add_request(ring, NULL, NULL); | |
1416 | if (ret) | 1420 | if (ret) | |
1417 | return ret; | 1421 | return ret; | |
1418 | } | 1422 | } | |
1419 | 1423 | |||
1420 | /* Wait upon the last request to be completed */ | 1424 | /* Wait upon the last request to be completed */ | |
1421 | if (list_empty(&ring->request_list)) | 1425 | if (list_empty(&ring->request_list)) | |
1422 | return 0; | 1426 | return 0; | |
1423 | 1427 | |||
1424 | seqno = list_entry(ring->request_list.prev, | 1428 | seqno = list_entry(ring->request_list.prev, | |
1425 | struct drm_i915_gem_request, | 1429 | struct drm_i915_gem_request, | |
1426 | list)->seqno; | 1430 | list)->seqno; | |
1427 | 1431 | |||
1428 | return i915_wait_seqno(ring, seqno); | 1432 | return i915_wait_seqno(ring, seqno); | |
1429 | } | 1433 | } | |
1430 | 1434 | |||
1431 | static int | 1435 | static int | |
1432 | intel_ring_alloc_seqno(struct intel_ring_buffer *ring) | 1436 | intel_ring_alloc_seqno(struct intel_ring_buffer *ring) | |
1433 | { | 1437 | { | |
1434 | if (ring->outstanding_lazy_request) | 1438 | if (ring->outstanding_lazy_request) | |
1435 | return 0; | 1439 | return 0; | |
1436 | 1440 | |||
1437 | return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); | 1441 | return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); | |
1438 | } | 1442 | } | |
1439 | 1443 | |||
1440 | int intel_ring_begin(struct intel_ring_buffer *ring, | 1444 | int intel_ring_begin(struct intel_ring_buffer *ring, | |
1441 | int num_dwords) | 1445 | int num_dwords) | |
1442 | { | 1446 | { | |
1443 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 1447 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | |
1444 | int n = 4*num_dwords; | 1448 | int n = 4*num_dwords; | |
1445 | int ret; | 1449 | int ret; | |
1446 | 1450 | |||
1447 | ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); | 1451 | ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); | |
1448 | if (ret) | 1452 | if (ret) | |
1449 | return ret; | 1453 | return ret; | |
1450 | 1454 | |||
1451 | /* Preallocate the olr before touching the ring */ | 1455 | /* Preallocate the olr before touching the ring */ | |
1452 | ret = intel_ring_alloc_seqno(ring); | 1456 | ret = intel_ring_alloc_seqno(ring); | |
1453 | if (ret) | 1457 | if (ret) | |
1454 | return ret; | 1458 | return ret; | |
1455 | 1459 | |||
1456 | if (unlikely(ring->tail + n > ring->effective_size)) { | 1460 | if (unlikely(ring->tail + n > ring->effective_size)) { | |
1457 | ret = intel_wrap_ring_buffer(ring); | 1461 | ret = intel_wrap_ring_buffer(ring); | |
1458 | if (unlikely(ret)) | 1462 | if (unlikely(ret)) | |
1459 | return ret; | 1463 | return ret; | |
1460 | } | 1464 | } | |
1461 | 1465 | |||
1462 | if (unlikely(ring->space < n)) { | 1466 | if (unlikely(ring->space < n)) { | |
1463 | ret = ring_wait_for_space(ring, n); | 1467 | ret = ring_wait_for_space(ring, n); | |
1464 | if (unlikely(ret)) | 1468 | if (unlikely(ret)) | |
1465 | return ret; | 1469 | return ret; | |
1466 | } | 1470 | } | |
1467 | 1471 | |||
1468 | ring->space -= n; | 1472 | ring->space -= n; | |
1469 | return 0; | 1473 | return 0; | |
1470 | } | 1474 | } | |
1471 | 1475 | |||
1472 | void intel_ring_advance(struct intel_ring_buffer *ring) | 1476 | void intel_ring_advance(struct intel_ring_buffer *ring) | |
1473 | { | 1477 | { | |
1474 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 1478 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
1475 | 1479 | |||
1476 | ring->tail &= ring->size - 1; | 1480 | ring->tail &= ring->size - 1; | |
1477 | if (dev_priv->stop_rings & intel_ring_flag(ring)) | 1481 | if (dev_priv->stop_rings & intel_ring_flag(ring)) | |
1478 | return; | 1482 | return; | |
1479 | ring->write_tail(ring, ring->tail); | 1483 | ring->write_tail(ring, ring->tail); | |
1480 | } | 1484 | } | |
1481 | 1485 | |||
1482 | 1486 | |||
1483 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, | 1487 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, | |
1484 | u32 value) | 1488 | u32 value) | |
1485 | { | 1489 | { | |
1486 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 1490 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | |
1487 | 1491 | |||
1488 | /* Every tail move must follow the sequence below */ | 1492 | /* Every tail move must follow the sequence below */ | |
1489 | 1493 | |||
1490 | /* Disable notification that the ring is IDLE. The GT | 1494 | /* Disable notification that the ring is IDLE. The GT | |
1491 | * will then assume that it is busy and bring it out of rc6. | 1495 | * will then assume that it is busy and bring it out of rc6. | |
1492 | */ | 1496 | */ | |
1493 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | 1497 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | |
1494 | _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); | 1498 | _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); | |
1495 | 1499 | |||
1496 | /* Clear the context id. Here be magic! */ | 1500 | /* Clear the context id. Here be magic! */ | |
1497 | I915_WRITE64(GEN6_BSD_RNCID, 0x0); | 1501 | I915_WRITE64(GEN6_BSD_RNCID, 0x0); | |
1498 | 1502 | |||
1499 | /* Wait for the ring not to be idle, i.e. for it to wake up. */ | 1503 | /* Wait for the ring not to be idle, i.e. for it to wake up. */ | |
1500 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & | 1504 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & | |
1501 | GEN6_BSD_SLEEP_INDICATOR) == 0, | 1505 | GEN6_BSD_SLEEP_INDICATOR) == 0, | |
1502 | 50)) | 1506 | 50)) | |
1503 | DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); | 1507 | DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); | |
1504 | 1508 | |||
1505 | /* Now that the ring is fully powered up, update the tail */ | 1509 | /* Now that the ring is fully powered up, update the tail */ | |
1506 | I915_WRITE_TAIL(ring, value); | 1510 | I915_WRITE_TAIL(ring, value); | |
1507 | POSTING_READ(RING_TAIL(ring->mmio_base)); | 1511 | POSTING_READ(RING_TAIL(ring->mmio_base)); | |
1508 | 1512 | |||
1509 | /* Let the ring send IDLE messages to the GT again, | 1513 | /* Let the ring send IDLE messages to the GT again, | |
1510 | * and so let it sleep to conserve power when idle. | 1514 | * and so let it sleep to conserve power when idle. | |
1511 | */ | 1515 | */ | |
1512 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | 1516 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | |
1513 | _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); | 1517 | _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); | |
1514 | } | 1518 | } | |
1515 | 1519 | |||
1516 | static int gen6_ring_flush(struct intel_ring_buffer *ring, | 1520 | static int gen6_ring_flush(struct intel_ring_buffer *ring, | |
1517 | u32 invalidate, u32 flush) | 1521 | u32 invalidate, u32 flush) | |
1518 | { | 1522 | { | |
1519 | uint32_t cmd; | 1523 | uint32_t cmd; | |
1520 | int ret; | 1524 | int ret; | |
1521 | 1525 | |||
1522 | ret = intel_ring_begin(ring, 4); | 1526 | ret = intel_ring_begin(ring, 4); | |
1523 | if (ret) | 1527 | if (ret) | |
1524 | return ret; | 1528 | return ret; | |
1525 | 1529 | |||
1526 | cmd = MI_FLUSH_DW; | 1530 | cmd = MI_FLUSH_DW; | |
1527 | /* | 1531 | /* | |
1528 | * Bspec vol 1c.5 - video engine command streamer: | 1532 | * Bspec vol 1c.5 - video engine command streamer: | |
1529 | * "If ENABLED, all TLBs will be invalidated once the flush | 1533 | * "If ENABLED, all TLBs will be invalidated once the flush | |
1530 | * operation is complete. This bit is only valid when the | 1534 | * operation is complete. This bit is only valid when the | |
1531 | * Post-Sync Operation field is a value of 1h or 3h." | 1535 | * Post-Sync Operation field is a value of 1h or 3h." | |
1532 | */ | 1536 | */ | |
1533 | if (invalidate & I915_GEM_GPU_DOMAINS) | 1537 | if (invalidate & I915_GEM_GPU_DOMAINS) | |
1534 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | | 1538 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | | |
1535 | MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; | 1539 | MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; | |
1536 | intel_ring_emit(ring, cmd); | 1540 | intel_ring_emit(ring, cmd); | |
1537 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); | 1541 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); | |
1538 | intel_ring_emit(ring, 0); | 1542 | intel_ring_emit(ring, 0); | |
1539 | intel_ring_emit(ring, MI_NOOP); | 1543 | intel_ring_emit(ring, MI_NOOP); | |
1540 | intel_ring_advance(ring); | 1544 | intel_ring_advance(ring); | |
1541 | return 0; | 1545 | return 0; | |
1542 | } | 1546 | } | |
1543 | 1547 | |||
1544 | static int | 1548 | static int | |
1545 | hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | 1549 | hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | |
1546 | u32 offset, u32 len, | 1550 | u32 offset, u32 len, | |
1547 | unsigned flags) | 1551 | unsigned flags) | |
1548 | { | 1552 | { | |
1549 | int ret; | 1553 | int ret; | |
1550 | 1554 | |||
1551 | ret = intel_ring_begin(ring, 2); | 1555 | ret = intel_ring_begin(ring, 2); | |
1552 | if (ret) | 1556 | if (ret) | |
1553 | return ret; | 1557 | return ret; | |
1554 | 1558 | |||
1555 | intel_ring_emit(ring, | 1559 | intel_ring_emit(ring, | |
1556 | MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | | 1560 | MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | | |
1557 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); | 1561 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); | |
1558 | /* bit0-7 is the length on GEN6+ */ | 1562 | /* bit0-7 is the length on GEN6+ */ | |
1559 | intel_ring_emit(ring, offset); | 1563 | intel_ring_emit(ring, offset); | |
1560 | intel_ring_advance(ring); | 1564 | intel_ring_advance(ring); | |
1561 | 1565 | |||
1562 | return 0; | 1566 | return 0; | |
1563 | } | 1567 | } | |
1564 | 1568 | |||
1565 | static int | 1569 | static int | |
1566 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | 1570 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | |
1567 | u32 offset, u32 len, | 1571 | u32 offset, u32 len, | |
1568 | unsigned flags) | 1572 | unsigned flags) | |
1569 | { | 1573 | { | |
1570 | int ret; | 1574 | int ret; | |
1571 | 1575 | |||
1572 | ret = intel_ring_begin(ring, 2); | 1576 | ret = intel_ring_begin(ring, 2); | |
1573 | if (ret) | 1577 | if (ret) | |
1574 | return ret; | 1578 | return ret; | |
1575 | 1579 | |||
1576 | intel_ring_emit(ring, | 1580 | intel_ring_emit(ring, | |
1577 | MI_BATCH_BUFFER_START | | 1581 | MI_BATCH_BUFFER_START | | |
1578 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); | 1582 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); | |
1579 | /* bit0-7 is the length on GEN6+ */ | 1583 | /* bit0-7 is the length on GEN6+ */ | |
1580 | intel_ring_emit(ring, offset); | 1584 | intel_ring_emit(ring, offset); | |
1581 | intel_ring_advance(ring); | 1585 | intel_ring_advance(ring); | |
1582 | 1586 | |||
1583 | return 0; | 1587 | return 0; | |
1584 | } | 1588 | } | |
1585 | 1589 | |||
1586 | /* Blitter support (SandyBridge+) */ | 1590 | /* Blitter support (SandyBridge+) */ | |
1587 | 1591 | |||
1588 | static int blt_ring_flush(struct intel_ring_buffer *ring, | 1592 | static int blt_ring_flush(struct intel_ring_buffer *ring, | |
1589 | u32 invalidate, u32 flush) | 1593 | u32 invalidate, u32 flush) | |
1590 | { | 1594 | { | |
1591 | uint32_t cmd; | 1595 | uint32_t cmd; | |
1592 | int ret; | 1596 | int ret; | |
1593 | 1597 | |||
1594 | ret = intel_ring_begin(ring, 4); | 1598 | ret = intel_ring_begin(ring, 4); | |
1595 | if (ret) | 1599 | if (ret) | |
1596 | return ret; | 1600 | return ret; | |
1597 | 1601 | |||
1598 | cmd = MI_FLUSH_DW; | 1602 | cmd = MI_FLUSH_DW; | |
1599 | /* | 1603 | /* | |
1600 | * Bspec vol 1c.3 - blitter engine command streamer: | 1604 | * Bspec vol 1c.3 - blitter engine command streamer: | |
1601 | * "If ENABLED, all TLBs will be invalidated once the flush | 1605 | * "If ENABLED, all TLBs will be invalidated once the flush | |
1602 | * operation is complete. This bit is only valid when the | 1606 | * operation is complete. This bit is only valid when the | |
1603 | * Post-Sync Operation field is a value of 1h or 3h." | 1607 | * Post-Sync Operation field is a value of 1h or 3h." | |
1604 | */ | 1608 | */ | |
1605 | if (invalidate & I915_GEM_DOMAIN_RENDER) | 1609 | if (invalidate & I915_GEM_DOMAIN_RENDER) | |
1606 | cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | | 1610 | cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | | |
1607 | MI_FLUSH_DW_OP_STOREDW; | 1611 | MI_FLUSH_DW_OP_STOREDW; | |
1608 | intel_ring_emit(ring, cmd); | 1612 | intel_ring_emit(ring, cmd); | |
1609 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); | 1613 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); | |
1610 | intel_ring_emit(ring, 0); | 1614 | intel_ring_emit(ring, 0); | |
1611 | intel_ring_emit(ring, MI_NOOP); | 1615 | intel_ring_emit(ring, MI_NOOP); | |
1612 | intel_ring_advance(ring); | 1616 | intel_ring_advance(ring); | |
1613 | return 0; | 1617 | return 0; | |
1614 | } | 1618 | } | |
1615 | 1619 | |||
1616 | int intel_init_render_ring_buffer(struct drm_device *dev) | 1620 | int intel_init_render_ring_buffer(struct drm_device *dev) | |
1617 | { | 1621 | { | |
1618 | drm_i915_private_t *dev_priv = dev->dev_private; | 1622 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1619 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | 1623 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | |
1620 | 1624 | |||
1621 | ring->name = "render ring"; | 1625 | ring->name = "render ring"; | |
1622 | ring->id = RCS; | 1626 | ring->id = RCS; | |
1623 | ring->mmio_base = RENDER_RING_BASE; | 1627 | ring->mmio_base = RENDER_RING_BASE; | |
1624 | 1628 | |||
1625 | if (INTEL_INFO(dev)->gen >= 6) { | 1629 | if (INTEL_INFO(dev)->gen >= 6) { | |
1626 | ring->add_request = gen6_add_request; | 1630 | ring->add_request = gen6_add_request; | |
1627 | ring->flush = gen7_render_ring_flush; | 1631 | ring->flush = gen7_render_ring_flush; | |
1628 | if (INTEL_INFO(dev)->gen == 6) | 1632 | if (INTEL_INFO(dev)->gen == 6) | |
1629 | ring->flush = gen6_render_ring_flush; | 1633 | ring->flush = gen6_render_ring_flush; | |
1630 | ring->irq_get = gen6_ring_get_irq; | 1634 | ring->irq_get = gen6_ring_get_irq; | |
1631 | ring->irq_put = gen6_ring_put_irq; | 1635 | ring->irq_put = gen6_ring_put_irq; | |
1632 | ring->irq_enable_mask = GT_USER_INTERRUPT; | 1636 | ring->irq_enable_mask = GT_USER_INTERRUPT; | |
1633 | ring->get_seqno = gen6_ring_get_seqno; | 1637 | ring->get_seqno = gen6_ring_get_seqno; | |
1634 | ring->sync_to = gen6_ring_sync; | 1638 | ring->sync_to = gen6_ring_sync; | |
1635 | ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; | 1639 | ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; | |
1636 | ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; | 1640 | ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; | |
1637 | ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; | 1641 | ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; | |
1638 | ring->signal_mbox[0] = GEN6_VRSYNC; | 1642 | ring->signal_mbox[0] = GEN6_VRSYNC; | |
1639 | ring->signal_mbox[1] = GEN6_BRSYNC; | 1643 | ring->signal_mbox[1] = GEN6_BRSYNC; | |
1640 | } else if (IS_GEN5(dev)) { | 1644 | } else if (IS_GEN5(dev)) { | |
1641 | ring->add_request = pc_render_add_request; | 1645 | ring->add_request = pc_render_add_request; | |
1642 | ring->flush = gen4_render_ring_flush; | 1646 | ring->flush = gen4_render_ring_flush; | |
1643 | ring->get_seqno = pc_render_get_seqno; | 1647 | ring->get_seqno = pc_render_get_seqno; | |
1644 | ring->irq_get = gen5_ring_get_irq; | 1648 | ring->irq_get = gen5_ring_get_irq; | |
1645 | ring->irq_put = gen5_ring_put_irq; | 1649 | ring->irq_put = gen5_ring_put_irq; | |
1646 | ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; | 1650 | ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; | |
1647 | } else { | 1651 | } else { | |
1648 | ring->add_request = i9xx_add_request; | 1652 | ring->add_request = i9xx_add_request; | |
1649 | if (INTEL_INFO(dev)->gen < 4) | 1653 | if (INTEL_INFO(dev)->gen < 4) | |
1650 | ring->flush = gen2_render_ring_flush; | 1654 | ring->flush = gen2_render_ring_flush; | |
1651 | else | 1655 | else | |
1652 | ring->flush = gen4_render_ring_flush; | 1656 | ring->flush = gen4_render_ring_flush; | |
1653 | ring->get_seqno = ring_get_seqno; | 1657 | ring->get_seqno = ring_get_seqno; | |
1654 | if (IS_GEN2(dev)) { | 1658 | if (IS_GEN2(dev)) { | |
1655 | ring->irq_get = i8xx_ring_get_irq; | 1659 | ring->irq_get = i8xx_ring_get_irq; | |
1656 | ring->irq_put = i8xx_ring_put_irq; | 1660 | ring->irq_put = i8xx_ring_put_irq; | |
1657 | } else { | 1661 | } else { | |
1658 | ring->irq_get = i9xx_ring_get_irq; | 1662 | ring->irq_get = i9xx_ring_get_irq; | |
1659 | ring->irq_put = i9xx_ring_put_irq; | 1663 | ring->irq_put = i9xx_ring_put_irq; | |
1660 | } | 1664 | } | |
1661 | ring->irq_enable_mask = I915_USER_INTERRUPT; | 1665 | ring->irq_enable_mask = I915_USER_INTERRUPT; | |
1662 | } | 1666 | } | |
1663 | ring->write_tail = ring_write_tail; | 1667 | ring->write_tail = ring_write_tail; | |
1664 | if (IS_HASWELL(dev)) | 1668 | if (IS_HASWELL(dev)) | |
1665 | ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; | 1669 | ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; | |
1666 | else if (INTEL_INFO(dev)->gen >= 6) | 1670 | else if (INTEL_INFO(dev)->gen >= 6) | |
1667 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 1671 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | |
1668 | else if (INTEL_INFO(dev)->gen >= 4) | 1672 | else if (INTEL_INFO(dev)->gen >= 4) | |
1669 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; | 1673 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; | |
1670 | else if (IS_I830(dev) || IS_845G(dev)) | 1674 | else if (IS_I830(dev) || IS_845G(dev)) | |
1671 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; | 1675 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; | |
1672 | else | 1676 | else | |
1673 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; | 1677 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; | |
1674 | ring->init = init_render_ring; | 1678 | ring->init = init_render_ring; | |
1675 | ring->cleanup = render_ring_cleanup; | 1679 | ring->cleanup = render_ring_cleanup; | |
1676 | 1680 | |||
1677 | /* Workaround batchbuffer to combat CS tlb bug. */ | 1681 | /* Workaround batchbuffer to combat CS tlb bug. */ | |
1678 | if (HAS_BROKEN_CS_TLB(dev)) { | 1682 | if (HAS_BROKEN_CS_TLB(dev)) { | |
1679 | struct drm_i915_gem_object *obj; | 1683 | struct drm_i915_gem_object *obj; | |
1680 | int ret; | 1684 | int ret; | |
1681 | 1685 | |||
1682 | obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); | 1686 | obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); | |
1683 | if (obj == NULL) { | 1687 | if (obj == NULL) { | |
1684 | DRM_ERROR("Failed to allocate batch bo\n"); | 1688 | DRM_ERROR("Failed to allocate batch bo\n"); | |
1685 | return -ENOMEM; | 1689 | return -ENOMEM; | |
1686 | } | 1690 | } | |
1687 | 1691 | |||
1688 | ret = i915_gem_object_pin(obj, 0, true, false); | 1692 | ret = i915_gem_object_pin(obj, 0, true, false); | |
1689 | if (ret != 0) { | 1693 | if (ret != 0) { | |
1690 | drm_gem_object_unreference(&obj->base); | 1694 | drm_gem_object_unreference(&obj->base); | |
1691 | DRM_ERROR("Failed to ping batch bo\n"); | 1695 | DRM_ERROR("Failed to ping batch bo\n"); | |
1692 | return ret; | 1696 | return ret; | |
1693 | } | 1697 | } | |
1694 | 1698 | |||
1695 | ring->private = obj; | 1699 | ring->private = obj; | |
1696 | } | 1700 | } | |
1697 | 1701 | |||
1698 | return intel_init_ring_buffer(dev, ring); | 1702 | return intel_init_ring_buffer(dev, ring); | |
1699 | } | 1703 | } | |
1700 | 1704 | |||
1701 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | 1705 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |
1702 | { | 1706 | { | |
1703 | drm_i915_private_t *dev_priv = dev->dev_private; | 1707 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1704 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | 1708 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | |
1705 | int ret; | 1709 | int ret; | |
1706 | 1710 | |||
1707 | ring->name = "render ring"; | 1711 | ring->name = "render ring"; | |
1708 | ring->id = RCS; | 1712 | ring->id = RCS; | |
1709 | ring->mmio_base = RENDER_RING_BASE; | 1713 | ring->mmio_base = RENDER_RING_BASE; | |
1710 | 1714 | |||
1711 | if (INTEL_INFO(dev)->gen >= 6) { | 1715 | if (INTEL_INFO(dev)->gen >= 6) { | |
1712 | /* non-kms not supported on gen6+ */ | 1716 | /* non-kms not supported on gen6+ */ | |
1713 | return -ENODEV; | 1717 | return -ENODEV; | |
1714 | } | 1718 | } | |
1715 | 1719 | |||
1716 | /* Note: gem is not supported on gen5/ilk without kms (the corresponding | 1720 | /* Note: gem is not supported on gen5/ilk without kms (the corresponding | |
1717 | * gem_init ioctl returns with -ENODEV). Hence we do not need to set up | 1721 | * gem_init ioctl returns with -ENODEV). Hence we do not need to set up | |
1718 | * the special gen5 functions. */ | 1722 | * the special gen5 functions. */ | |
1719 | ring->add_request = i9xx_add_request; | 1723 | ring->add_request = i9xx_add_request; | |
1720 | if (INTEL_INFO(dev)->gen < 4) | 1724 | if (INTEL_INFO(dev)->gen < 4) | |
1721 | ring->flush = gen2_render_ring_flush; | 1725 | ring->flush = gen2_render_ring_flush; | |
1722 | else | 1726 | else | |
1723 | ring->flush = gen4_render_ring_flush; | 1727 | ring->flush = gen4_render_ring_flush; | |
1724 | ring->get_seqno = ring_get_seqno; | 1728 | ring->get_seqno = ring_get_seqno; | |
1725 | if (IS_GEN2(dev)) { | 1729 | if (IS_GEN2(dev)) { | |
1726 | ring->irq_get = i8xx_ring_get_irq; | 1730 | ring->irq_get = i8xx_ring_get_irq; | |
1727 | ring->irq_put = i8xx_ring_put_irq; | 1731 | ring->irq_put = i8xx_ring_put_irq; | |
1728 | } else { | 1732 | } else { | |
1729 | ring->irq_get = i9xx_ring_get_irq; | 1733 | ring->irq_get = i9xx_ring_get_irq; | |
1730 | ring->irq_put = i9xx_ring_put_irq; | 1734 | ring->irq_put = i9xx_ring_put_irq; | |
1731 | } | 1735 | } | |
1732 | ring->irq_enable_mask = I915_USER_INTERRUPT; | 1736 | ring->irq_enable_mask = I915_USER_INTERRUPT; | |
1733 | ring->write_tail = ring_write_tail; | 1737 | ring->write_tail = ring_write_tail; | |
1734 | if (INTEL_INFO(dev)->gen >= 4) | 1738 | if (INTEL_INFO(dev)->gen >= 4) | |
1735 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; | 1739 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; | |
1736 | else if (IS_I830(dev) || IS_845G(dev)) | 1740 | else if (IS_I830(dev) || IS_845G(dev)) | |
1737 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; | 1741 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; | |
1738 | else | 1742 | else | |
1739 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; | 1743 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; | |
1740 | ring->init = init_render_ring; | 1744 | ring->init = init_render_ring; | |
1741 | ring->cleanup = render_ring_cleanup; | 1745 | ring->cleanup = render_ring_cleanup; | |
1742 | 1746 | |||
1743 | ring->dev = dev; | 1747 | ring->dev = dev; | |
1744 | INIT_LIST_HEAD(&ring->active_list); | 1748 | INIT_LIST_HEAD(&ring->active_list); | |
1745 | INIT_LIST_HEAD(&ring->request_list); | 1749 | INIT_LIST_HEAD(&ring->request_list); | |
1746 | 1750 | |||
1747 | ring->size = size; | 1751 | ring->size = size; | |
1748 | ring->effective_size = ring->size; | 1752 | ring->effective_size = ring->size; | |
1749 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) | 1753 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) | |
1750 | ring->effective_size -= 128; | 1754 | ring->effective_size -= 128; | |
1751 | 1755 | |||
1752 | #ifdef __NetBSD__ | 1756 | #ifdef __NetBSD__ | |
1753 | ring->virtual_start_map.offset = start; | 1757 | ring->virtual_start_map.offset = start; | |
1754 | ring->virtual_start_map.size = size; | 1758 | ring->virtual_start_map.size = size; | |
1755 | ring->virtual_start_map.type = _DRM_REGISTERS; | 1759 | ring->virtual_start_map.type = _DRM_REGISTERS; | |
1756 | ring->virtual_start_map.flags = 0; | 1760 | ring->virtual_start_map.flags = 0; | |
1757 | ring->virtual_start_map.flags |= _DRM_RESTRICTED; | 1761 | ring->virtual_start_map.flags |= _DRM_RESTRICTED; | |
1758 | ring->virtual_start_map.flags |= _DRM_KERNEL; | 1762 | ring->virtual_start_map.flags |= _DRM_KERNEL; | |
1759 | ring->virtual_start_map.flags |= _DRM_WRITE_COMBINING; | 1763 | ring->virtual_start_map.flags |= _DRM_WRITE_COMBINING; | |
1760 | ring->virtual_start_map.flags |= _DRM_DRIVER; | 1764 | ring->virtual_start_map.flags |= _DRM_DRIVER; | |
1761 | ret = drm_ioremap(dev, &ring->virtual_start_map); | 1765 | ret = drm_ioremap(dev, &ring->virtual_start_map); | |
1762 | if (ret) { | 1766 | if (ret) { | |
1763 | DRM_ERROR("cannot ioremap virtual address for ring buffer\n"); | 1767 | DRM_ERROR("cannot ioremap virtual address for ring buffer\n"); | |
1764 | return ret; | 1768 | return ret; | |
1765 | } | 1769 | } | |
1766 | ring->virtual_start_mapped = true; | 1770 | ring->virtual_start_mapped = true; | |
1767 | #else | 1771 | #else | |
1768 | ring->virtual_start = ioremap_wc(start, size); | 1772 | ring->virtual_start = ioremap_wc(start, size); | |
1769 | if (ring->virtual_start == NULL) { | 1773 | if (ring->virtual_start == NULL) { | |
1770 | DRM_ERROR("can not ioremap virtual address for" | 1774 | DRM_ERROR("can not ioremap virtual address for" | |
1771 | " ring buffer\n"); | 1775 | " ring buffer\n"); | |
1772 | return -ENOMEM; | 1776 | return -ENOMEM; | |
1773 | } | 1777 | } | |
1774 | #endif | 1778 | #endif | |
1775 | 1779 | |||
1776 | if (!I915_NEED_GFX_HWS(dev)) { | 1780 | if (!I915_NEED_GFX_HWS(dev)) { | |
1777 | ret = init_phys_hws_pga(ring); | 1781 | ret = init_phys_hws_pga(ring); | |
1778 | if (ret) | 1782 | if (ret) | |
1779 | return ret; | 1783 | return ret; | |
1780 | } | 1784 | } | |
1781 | 1785 | |||
1782 | return 0; | 1786 | return 0; | |
1783 | } | 1787 | } | |
1784 | 1788 | |||
1785 | int intel_init_bsd_ring_buffer(struct drm_device *dev) | 1789 | int intel_init_bsd_ring_buffer(struct drm_device *dev) | |
1786 | { | 1790 | { | |
1787 | drm_i915_private_t *dev_priv = dev->dev_private; | 1791 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1788 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; | 1792 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; | |
1789 | 1793 | |||
1790 | ring->name = "bsd ring"; | 1794 | ring->name = "bsd ring"; | |
1791 | ring->id = VCS; | 1795 | ring->id = VCS; | |
1792 | 1796 | |||
1793 | ring->write_tail = ring_write_tail; | 1797 | ring->write_tail = ring_write_tail; | |
1794 | if (IS_GEN6(dev) || IS_GEN7(dev)) { | 1798 | if (IS_GEN6(dev) || IS_GEN7(dev)) { | |
1795 | ring->mmio_base = GEN6_BSD_RING_BASE; | 1799 | ring->mmio_base = GEN6_BSD_RING_BASE; | |
1796 | /* gen6 bsd needs a special wa for tail updates */ | 1800 | /* gen6 bsd needs a special wa for tail updates */ | |
1797 | if (IS_GEN6(dev)) | 1801 | if (IS_GEN6(dev)) | |
1798 | ring->write_tail = gen6_bsd_ring_write_tail; | 1802 | ring->write_tail = gen6_bsd_ring_write_tail; | |
1799 | ring->flush = gen6_ring_flush; | 1803 | ring->flush = gen6_ring_flush; | |
1800 | ring->add_request = gen6_add_request; | 1804 | ring->add_request = gen6_add_request; | |
1801 | ring->get_seqno = gen6_ring_get_seqno; | 1805 | ring->get_seqno = gen6_ring_get_seqno; | |
1802 | ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; | 1806 | ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; | |
1803 | ring->irq_get = gen6_ring_get_irq; | 1807 | ring->irq_get = gen6_ring_get_irq; | |
1804 | ring->irq_put = gen6_ring_put_irq; | 1808 | ring->irq_put = gen6_ring_put_irq; | |
1805 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 1809 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | |
1806 | ring->sync_to = gen6_ring_sync; | 1810 | ring->sync_to = gen6_ring_sync; | |
1807 | ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR; | 1811 | ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR; | |
1808 | ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID; | 1812 | ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID; | |
1809 | ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB; | 1813 | ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB; | |
1810 | ring->signal_mbox[0] = GEN6_RVSYNC; | 1814 | ring->signal_mbox[0] = GEN6_RVSYNC; | |
1811 | ring->signal_mbox[1] = GEN6_BVSYNC; | 1815 | ring->signal_mbox[1] = GEN6_BVSYNC; | |
1812 | } else { | 1816 | } else { | |
1813 | ring->mmio_base = BSD_RING_BASE; | 1817 | ring->mmio_base = BSD_RING_BASE; | |
1814 | ring->flush = bsd_ring_flush; | 1818 | ring->flush = bsd_ring_flush; | |
1815 | ring->add_request = i9xx_add_request; | 1819 | ring->add_request = i9xx_add_request; | |
1816 | ring->get_seqno = ring_get_seqno; | 1820 | ring->get_seqno = ring_get_seqno; | |
1817 | if (IS_GEN5(dev)) { | 1821 | if (IS_GEN5(dev)) { | |
1818 | ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; | 1822 | ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; | |
1819 | ring->irq_get = gen5_ring_get_irq; | 1823 | ring->irq_get = gen5_ring_get_irq; | |
1820 | ring->irq_put = gen5_ring_put_irq; | 1824 | ring->irq_put = gen5_ring_put_irq; | |
1821 | } else { | 1825 | } else { | |
1822 | ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; | 1826 | ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; | |
1823 | ring->irq_get = i9xx_ring_get_irq; | 1827 | ring->irq_get = i9xx_ring_get_irq; | |
1824 | ring->irq_put = i9xx_ring_put_irq; | 1828 | ring->irq_put = i9xx_ring_put_irq; | |
1825 | } | 1829 | } | |
1826 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; | 1830 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; | |
1827 | } | 1831 | } | |
1828 | ring->init = init_ring_common; | 1832 | ring->init = init_ring_common; | |
1829 | 1833 | |||
1830 | return intel_init_ring_buffer(dev, ring); | 1834 | return intel_init_ring_buffer(dev, ring); | |
1831 | } | 1835 | } | |
1832 | 1836 | |||
1833 | int intel_init_blt_ring_buffer(struct drm_device *dev) | 1837 | int intel_init_blt_ring_buffer(struct drm_device *dev) | |
1834 | { | 1838 | { | |
1835 | drm_i915_private_t *dev_priv = dev->dev_private; | 1839 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1836 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; | 1840 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; | |
1837 | 1841 | |||
1838 | ring->name = "blitter ring"; | 1842 | ring->name = "blitter ring"; | |
1839 | ring->id = BCS; | 1843 | ring->id = BCS; | |
1840 | 1844 | |||
1841 | ring->mmio_base = BLT_RING_BASE; | 1845 | ring->mmio_base = BLT_RING_BASE; | |
1842 | ring->write_tail = ring_write_tail; | 1846 | ring->write_tail = ring_write_tail; | |
1843 | ring->flush = blt_ring_flush; | 1847 | ring->flush = blt_ring_flush; | |
1844 | ring->add_request = gen6_add_request; | 1848 | ring->add_request = gen6_add_request; | |
1845 | ring->get_seqno = gen6_ring_get_seqno; | 1849 | ring->get_seqno = gen6_ring_get_seqno; | |
1846 | ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; | 1850 | ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; | |
1847 | ring->irq_get = gen6_ring_get_irq; | 1851 | ring->irq_get = gen6_ring_get_irq; | |
1848 | ring->irq_put = gen6_ring_put_irq; | 1852 | ring->irq_put = gen6_ring_put_irq; | |
1849 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | 1853 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; | |
1850 | ring->sync_to = gen6_ring_sync; | 1854 | ring->sync_to = gen6_ring_sync; | |
1851 | ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR; | 1855 | ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR; | |
1852 | ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV; | 1856 | ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV; | |
1853 | ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID; | 1857 | ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID; | |
1854 | ring->signal_mbox[0] = GEN6_RBSYNC; | 1858 | ring->signal_mbox[0] = GEN6_RBSYNC; | |
1855 | ring->signal_mbox[1] = GEN6_VBSYNC; | 1859 | ring->signal_mbox[1] = GEN6_VBSYNC; | |
1856 | ring->init = init_ring_common; | 1860 | ring->init = init_ring_common; | |
1857 | 1861 | |||
1858 | return intel_init_ring_buffer(dev, ring); | 1862 | return intel_init_ring_buffer(dev, ring); | |
1859 | } | 1863 | } | |
1860 | 1864 | |||
1861 | int | 1865 | int | |
1862 | intel_ring_flush_all_caches(struct intel_ring_buffer *ring) | 1866 | intel_ring_flush_all_caches(struct intel_ring_buffer *ring) | |
1863 | { | 1867 | { | |
1864 | int ret; | 1868 | int ret; | |
1865 | 1869 | |||
1866 | if (!ring->gpu_caches_dirty) | 1870 | if (!ring->gpu_caches_dirty) | |
1867 | return 0; | 1871 | return 0; | |
1868 | 1872 | |||
1869 | ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); | 1873 | ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); | |
1870 | if (ret) | 1874 | if (ret) | |
1871 | return ret; | 1875 | return ret; | |
1872 | 1876 | |||
1873 | trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); | 1877 | trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); | |
1874 | 1878 | |||
1875 | ring->gpu_caches_dirty = false; | 1879 | ring->gpu_caches_dirty = false; | |
1876 | return 0; | 1880 | return 0; | |
1877 | } | 1881 | } | |
1878 | 1882 | |||
1879 | int | 1883 | int | |
1880 | intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) | 1884 | intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) | |
1881 | { | 1885 | { | |
1882 | uint32_t flush_domains; | 1886 | uint32_t flush_domains; | |
1883 | int ret; | 1887 | int ret; | |
1884 | 1888 | |||
1885 | flush_domains = 0; | 1889 | flush_domains = 0; | |
1886 | if (ring->gpu_caches_dirty) | 1890 | if (ring->gpu_caches_dirty) | |
1887 | flush_domains = I915_GEM_GPU_DOMAINS; | 1891 | flush_domains = I915_GEM_GPU_DOMAINS; | |
1888 | 1892 | |||
1889 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); | 1893 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); | |
1890 | if (ret) | 1894 | if (ret) | |
1891 | return ret; | 1895 | return ret; | |
1892 | 1896 | |||
1893 | trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); | 1897 | trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); | |
1894 | 1898 | |||
1895 | ring->gpu_caches_dirty = false; | 1899 | ring->gpu_caches_dirty = false; | |
1896 | return 0; | 1900 | return 0; | |
1897 | } | 1901 | } |
--- src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_ringbuffer.h 2013/07/24 03:05:41 1.1.1.1.2.5
+++ src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_ringbuffer.h 2013/07/24 03:06:00 1.1.1.1.2.6
@@ -1,253 +1,257 @@ | @@ -1,253 +1,257 @@ | |||
1 | #ifndef _INTEL_RINGBUFFER_H_ | 1 | #ifndef _INTEL_RINGBUFFER_H_ | |
2 | #define _INTEL_RINGBUFFER_H_ | 2 | #define _INTEL_RINGBUFFER_H_ | |
3 | 3 | |||
4 | #include <asm/bug.h> | 4 | #include <asm/bug.h> | |
5 | 5 | |||
6 | /* | 6 | /* | |
7 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" | 7 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" | |
8 | * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" | 8 | * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" | |
9 | * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" | 9 | * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" | |
10 | * | 10 | * | |
11 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same | 11 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same | |
12 | * cacheline, the Head Pointer must not be greater than the Tail | 12 | * cacheline, the Head Pointer must not be greater than the Tail | |
13 | * Pointer." | 13 | * Pointer." | |
14 | */ | 14 | */ | |
15 | #define I915_RING_FREE_SPACE 64 | 15 | #define I915_RING_FREE_SPACE 64 | |
16 | 16 | |||
17 | struct intel_hw_status_page { | 17 | struct intel_hw_status_page { | |
18 | u32 *page_addr; | 18 | u32 *page_addr; | |
19 | unsigned int gfx_addr; | 19 | unsigned int gfx_addr; | |
20 | struct drm_i915_gem_object *obj; | 20 | struct drm_i915_gem_object *obj; | |
21 | }; | 21 | }; | |
22 | 22 | |||
23 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) | 23 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) | |
24 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) | 24 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) | |
25 | 25 | |||
26 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) | 26 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) | |
27 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) | 27 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) | |
28 | 28 | |||
29 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) | 29 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) | |
30 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) | 30 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) | |
31 | 31 | |||
32 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) | 32 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) | |
33 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) | 33 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) | |
34 | 34 | |||
35 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) | 35 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) | |
36 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | 36 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | |
37 | 37 | |||
38 | #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) | 38 | #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) | |
39 | #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) | 39 | #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) | |
40 | #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) | 40 | #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) | |
41 | 41 | |||
42 | struct intel_ring_buffer { | 42 | struct intel_ring_buffer { | |
43 | const char *name; | 43 | const char *name; | |
44 | enum intel_ring_id { | 44 | enum intel_ring_id { | |
45 | RCS = 0x0, | 45 | RCS = 0x0, | |
46 | VCS, | 46 | VCS, | |
47 | BCS, | 47 | BCS, | |
48 | } id; | 48 | } id; | |
49 | #define I915_NUM_RINGS 3 | 49 | #define I915_NUM_RINGS 3 | |
50 | u32 mmio_base; | 50 | u32 mmio_base; | |
51 | #ifdef __NetBSD__ | 51 | #ifdef __NetBSD__ | |
52 | struct drm_local_map virtual_start_map; | 52 | struct drm_local_map virtual_start_map; | |
53 | bool virtual_start_mapped; | 53 | bool virtual_start_mapped; | |
54 | #else | 54 | #else | |
55 | void __iomem *virtual_start; | 55 | void __iomem *virtual_start; | |
56 | #endif | 56 | #endif | |
57 | struct drm_device *dev; | 57 | struct drm_device *dev; | |
58 | struct drm_i915_gem_object *obj; | 58 | struct drm_i915_gem_object *obj; | |
59 | 59 | |||
60 | u32 head; | 60 | u32 head; | |
61 | u32 tail; | 61 | u32 tail; | |
62 | int space; | 62 | int space; | |
63 | int size; | 63 | int size; | |
64 | int effective_size; | 64 | int effective_size; | |
65 | struct intel_hw_status_page status_page; | 65 | struct intel_hw_status_page status_page; | |
66 | 66 | |||
67 | /** We track the position of the requests in the ring buffer, and | 67 | /** We track the position of the requests in the ring buffer, and | |
68 | * when each is retired we increment last_retired_head as the GPU | 68 | * when each is retired we increment last_retired_head as the GPU | |
69 | * must have finished processing the request and so we know we | 69 | * must have finished processing the request and so we know we | |
70 | * can advance the ringbuffer up to that position. | 70 | * can advance the ringbuffer up to that position. | |
71 | * | 71 | * | |
72 | * last_retired_head is set to -1 after the value is consumed so | 72 | * last_retired_head is set to -1 after the value is consumed so | |
73 | * we can detect new retirements. | 73 | * we can detect new retirements. | |
74 | */ | 74 | */ | |
75 | u32 last_retired_head; | 75 | u32 last_retired_head; | |
76 | 76 | |||
77 | u32 irq_refcount; /* protected by dev_priv->irq_lock */ | 77 | u32 irq_refcount; /* protected by dev_priv->irq_lock */ | |
78 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ | 78 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ | |
79 | u32 trace_irq_seqno; | 79 | u32 trace_irq_seqno; | |
80 | u32 sync_seqno[I915_NUM_RINGS-1]; | 80 | u32 sync_seqno[I915_NUM_RINGS-1]; | |
81 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); | 81 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); | |
82 | void (*irq_put)(struct intel_ring_buffer *ring); | 82 | void (*irq_put)(struct intel_ring_buffer *ring); | |
83 | 83 | |||
84 | int (*init)(struct intel_ring_buffer *ring); | 84 | int (*init)(struct intel_ring_buffer *ring); | |
85 | 85 | |||
86 | void (*write_tail)(struct intel_ring_buffer *ring, | 86 | void (*write_tail)(struct intel_ring_buffer *ring, | |
87 | u32 value); | 87 | u32 value); | |
88 | int __must_check (*flush)(struct intel_ring_buffer *ring, | 88 | int __must_check (*flush)(struct intel_ring_buffer *ring, | |
89 | u32 invalidate_domains, | 89 | u32 invalidate_domains, | |
90 | u32 flush_domains); | 90 | u32 flush_domains); | |
91 | int (*add_request)(struct intel_ring_buffer *ring); | 91 | int (*add_request)(struct intel_ring_buffer *ring); | |
92 | /* Some chipsets are not quite as coherent as advertised and need | 92 | /* Some chipsets are not quite as coherent as advertised and need | |
93 | * an expensive kick to force a true read of the up-to-date seqno. | 93 | * an expensive kick to force a true read of the up-to-date seqno. | |
94 | * However, the up-to-date seqno is not always required and the last | 94 | * However, the up-to-date seqno is not always required and the last | |
95 | * seen value is good enough. Note that the seqno will always be | 95 | * seen value is good enough. Note that the seqno will always be | |
96 | * monotonic, even if not coherent. | 96 | * monotonic, even if not coherent. | |
97 | */ | 97 | */ | |
98 | u32 (*get_seqno)(struct intel_ring_buffer *ring, | 98 | u32 (*get_seqno)(struct intel_ring_buffer *ring, | |
99 | bool lazy_coherency); | 99 | bool lazy_coherency); | |
100 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, | 100 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, | |
101 | u32 offset, u32 length, | 101 | u32 offset, u32 length, | |
102 | unsigned flags); | 102 | unsigned flags); | |
103 | #define I915_DISPATCH_SECURE 0x1 | 103 | #define I915_DISPATCH_SECURE 0x1 | |
104 | #define I915_DISPATCH_PINNED 0x2 | 104 | #define I915_DISPATCH_PINNED 0x2 | |
105 | void (*cleanup)(struct intel_ring_buffer *ring); | 105 | void (*cleanup)(struct intel_ring_buffer *ring); | |
106 | int (*sync_to)(struct intel_ring_buffer *ring, | 106 | int (*sync_to)(struct intel_ring_buffer *ring, | |
107 | struct intel_ring_buffer *to, | 107 | struct intel_ring_buffer *to, | |
108 | u32 seqno); | 108 | u32 seqno); | |
109 | 109 | |||
110 | u32 semaphore_register[3]; /*our mbox written by others */ | 110 | u32 semaphore_register[3]; /*our mbox written by others */ | |
111 | u32 signal_mbox[2]; /* mboxes this ring signals to */ | 111 | u32 signal_mbox[2]; /* mboxes this ring signals to */ | |
112 | /** | 112 | /** | |
113 | * List of objects currently involved in rendering from the | 113 | * List of objects currently involved in rendering from the | |
114 | * ringbuffer. | 114 | * ringbuffer. | |
115 | * | 115 | * | |
116 | * Includes buffers having the contents of their GPU caches | 116 | * Includes buffers having the contents of their GPU caches | |
117 | * flushed, not necessarily primitives. last_rendering_seqno | 117 | * flushed, not necessarily primitives. last_rendering_seqno | |
118 | * represents when the rendering involved will be completed. | 118 | * represents when the rendering involved will be completed. | |
119 | * | 119 | * | |
120 | * A reference is held on the buffer while on this list. | 120 | * A reference is held on the buffer while on this list. | |
121 | */ | 121 | */ | |
122 | struct list_head active_list; | 122 | struct list_head active_list; | |
123 | 123 | |||
124 | /** | 124 | /** | |
125 | * List of breadcrumbs associated with GPU requests currently | 125 | * List of breadcrumbs associated with GPU requests currently | |
126 | * outstanding. | 126 | * outstanding. | |
127 | */ | 127 | */ | |
128 | struct list_head request_list; | 128 | struct list_head request_list; | |
129 | 129 | |||
130 | /** | 130 | /** | |
131 | * Do we have some not yet emitted requests outstanding? | 131 | * Do we have some not yet emitted requests outstanding? | |
132 | */ | 132 | */ | |
133 | u32 outstanding_lazy_request; | 133 | u32 outstanding_lazy_request; | |
134 | bool gpu_caches_dirty; | 134 | bool gpu_caches_dirty; | |
135 | 135 | |||
136 | #ifdef __NetBSD__ | |||
137 | drm_waitqueue_t irq_queue; | |||
138 | #else | |||
136 | wait_queue_head_t irq_queue; | 139 | wait_queue_head_t irq_queue; | |
140 | #endif | |||
137 | 141 | |||
138 | /** | 142 | /** | |
139 | * Do an explicit TLB flush before MI_SET_CONTEXT | 143 | * Do an explicit TLB flush before MI_SET_CONTEXT | |
140 | */ | 144 | */ | |
141 | bool itlb_before_ctx_switch; | 145 | bool itlb_before_ctx_switch; | |
142 | struct i915_hw_context *default_context; | 146 | struct i915_hw_context *default_context; | |
143 | struct drm_i915_gem_object *last_context_obj; | 147 | struct drm_i915_gem_object *last_context_obj; | |
144 | 148 | |||
145 | void *private; | 149 | void *private; | |
146 | }; | 150 | }; | |
147 | 151 | |||
148 | static inline bool | 152 | static inline bool | |
149 | intel_ring_initialized(struct intel_ring_buffer *ring) | 153 | intel_ring_initialized(struct intel_ring_buffer *ring) | |
150 | { | 154 | { | |
151 | return ring->obj != NULL; | 155 | return ring->obj != NULL; | |
152 | } | 156 | } | |
153 | 157 | |||
154 | static inline unsigned | 158 | static inline unsigned | |
155 | intel_ring_flag(struct intel_ring_buffer *ring) | 159 | intel_ring_flag(struct intel_ring_buffer *ring) | |
156 | { | 160 | { | |
157 | return 1 << ring->id; | 161 | return 1 << ring->id; | |
158 | } | 162 | } | |
159 | 163 | |||
160 | static inline u32 | 164 | static inline u32 | |
161 | intel_ring_sync_index(struct intel_ring_buffer *ring, | 165 | intel_ring_sync_index(struct intel_ring_buffer *ring, | |
162 | struct intel_ring_buffer *other) | 166 | struct intel_ring_buffer *other) | |
163 | { | 167 | { | |
164 | int idx; | 168 | int idx; | |
165 | 169 | |||
166 | /* | 170 | /* | |
167 | * cs -> 0 = vcs, 1 = bcs | 171 | * cs -> 0 = vcs, 1 = bcs | |
168 | * vcs -> 0 = bcs, 1 = cs, | 172 | * vcs -> 0 = bcs, 1 = cs, | |
169 | * bcs -> 0 = cs, 1 = vcs. | 173 | * bcs -> 0 = cs, 1 = vcs. | |
170 | */ | 174 | */ | |
171 | 175 | |||
172 | idx = (other - ring) - 1; | 176 | idx = (other - ring) - 1; | |
173 | if (idx < 0) | 177 | if (idx < 0) | |
174 | idx += I915_NUM_RINGS; | 178 | idx += I915_NUM_RINGS; | |
175 | 179 | |||
176 | return idx; | 180 | return idx; | |
177 | } | 181 | } | |
178 | 182 | |||
179 | static inline u32 | 183 | static inline u32 | |
180 | intel_read_status_page(struct intel_ring_buffer *ring, | 184 | intel_read_status_page(struct intel_ring_buffer *ring, | |
181 | int reg) | 185 | int reg) | |
182 | { | 186 | { | |
183 | /* Ensure that the compiler doesn't optimize away the load. */ | 187 | /* Ensure that the compiler doesn't optimize away the load. */ | |
184 | barrier(); | 188 | barrier(); | |
185 | return ring->status_page.page_addr[reg]; | 189 | return ring->status_page.page_addr[reg]; | |
186 | } | 190 | } | |
187 | 191 | |||
188 | /** | 192 | /** | |
189 | * Reads a dword out of the status page, which is written to from the command | 193 | * Reads a dword out of the status page, which is written to from the command | |
190 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or | 194 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or | |
191 | * MI_STORE_DATA_IMM. | 195 | * MI_STORE_DATA_IMM. | |
192 | * | 196 | * | |
193 | * The following dwords have a reserved meaning: | 197 | * The following dwords have a reserved meaning: | |
194 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. | 198 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. | |
195 | * 0x04: ring 0 head pointer | 199 | * 0x04: ring 0 head pointer | |
196 | * 0x05: ring 1 head pointer (915-class) | 200 | * 0x05: ring 1 head pointer (915-class) | |
197 | * 0x06: ring 2 head pointer (915-class) | 201 | * 0x06: ring 2 head pointer (915-class) | |
198 | * 0x10-0x1b: Context status DWords (GM45) | 202 | * 0x10-0x1b: Context status DWords (GM45) | |
199 | * 0x1f: Last written status offset. (GM45) | 203 | * 0x1f: Last written status offset. (GM45) | |
200 | * | 204 | * | |
201 | * The area from dword 0x20 to 0x3ff is available for driver usage. | 205 | * The area from dword 0x20 to 0x3ff is available for driver usage. | |
202 | */ | 206 | */ | |
203 | #define I915_GEM_HWS_INDEX 0x20 | 207 | #define I915_GEM_HWS_INDEX 0x20 | |
204 | #define I915_GEM_HWS_SCRATCH_INDEX 0x30 | 208 | #define I915_GEM_HWS_SCRATCH_INDEX 0x30 | |
205 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) | 209 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) | |
206 | 210 | |||
207 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); | 211 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); | |
208 | 212 | |||
209 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); | 213 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); | |
210 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, | 214 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, | |
211 | u32 data) | 215 | u32 data) | |
212 | { | 216 | { | |
213 | #ifdef __NetBSD__ | 217 | #ifdef __NetBSD__ | |
214 | DRM_WRITE32(&ring->virtual_start_map, ring->tail, data); | 218 | DRM_WRITE32(&ring->virtual_start_map, ring->tail, data); | |
215 | #else | 219 | #else | |
216 | iowrite32(data, ring->virtual_start + ring->tail); | 220 | iowrite32(data, ring->virtual_start + ring->tail); | |
217 | #endif | 221 | #endif | |
218 | ring->tail += 4; | 222 | ring->tail += 4; | |
219 | } | 223 | } | |
220 | void intel_ring_advance(struct intel_ring_buffer *ring); | 224 | void intel_ring_advance(struct intel_ring_buffer *ring); | |
221 | int __must_check intel_ring_idle(struct intel_ring_buffer *ring); | 225 | int __must_check intel_ring_idle(struct intel_ring_buffer *ring); | |
222 | 226 | |||
223 | int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); | 227 | int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); | |
224 | int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); | 228 | int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); | |
225 | 229 | |||
226 | int intel_init_render_ring_buffer(struct drm_device *dev); | 230 | int intel_init_render_ring_buffer(struct drm_device *dev); | |
227 | int intel_init_bsd_ring_buffer(struct drm_device *dev); | 231 | int intel_init_bsd_ring_buffer(struct drm_device *dev); | |
228 | int intel_init_blt_ring_buffer(struct drm_device *dev); | 232 | int intel_init_blt_ring_buffer(struct drm_device *dev); | |
229 | 233 | |||
230 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); | 234 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); | |
231 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); | 235 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); | |
232 | 236 | |||
233 | static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) | 237 | static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) | |
234 | { | 238 | { | |
235 | return ring->tail; | 239 | return ring->tail; | |
236 | } | 240 | } | |
237 | 241 | |||
238 | static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) | 242 | static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring) | |
239 | { | 243 | { | |
240 | BUG_ON(ring->outstanding_lazy_request == 0); | 244 | BUG_ON(ring->outstanding_lazy_request == 0); | |
241 | return ring->outstanding_lazy_request; | 245 | return ring->outstanding_lazy_request; | |
242 | } | 246 | } | |
243 | 247 | |||
244 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) | 248 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) | |
245 | { | 249 | { | |
246 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) | 250 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) | |
247 | ring->trace_irq_seqno = seqno; | 251 | ring->trace_irq_seqno = seqno; | |
248 | } | 252 | } | |
249 | 253 | |||
250 | /* DRI warts */ | 254 | /* DRI warts */ | |
251 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); | 255 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); | |
252 | 256 | |||
253 | #endif /* _INTEL_RINGBUFFER_H_ */ | 257 | #endif /* _INTEL_RINGBUFFER_H_ */ |