Wed Jul 24 02:59:46 2013 UTC ()
Switch intel_ring_buffers from Linux ioremap to drm_ioremap.


(riastradh)
diff -r1.1.1.1.2.2 -r1.1.1.1.2.3 src/sys/external/bsd/drm2/dist/drm/i915/intel_ringbuffer.c
diff -r1.1.1.1.2.3 -r1.1.1.1.2.4 src/sys/external/bsd/drm2/dist/drm/i915/intel_ringbuffer.h

cvs diff -r1.1.1.1.2.2 -r1.1.1.1.2.3 src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_ringbuffer.c (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_ringbuffer.c 2013/07/23 21:28:22 1.1.1.1.2.2
+++ src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_ringbuffer.c 2013/07/24 02:59:46 1.1.1.1.2.3
@@ -1171,51 +1171,71 @@ static int intel_init_ring_buffer(struct @@ -1171,51 +1171,71 @@ static int intel_init_ring_buffer(struct
1171 goto err_hws; 1171 goto err_hws;
1172 } 1172 }
1173 1173
1174 ring->obj = obj; 1174 ring->obj = obj;
1175 1175
1176 ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); 1176 ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
1177 if (ret) 1177 if (ret)
1178 goto err_unref; 1178 goto err_unref;
1179 1179
1180 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1180 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1181 if (ret) 1181 if (ret)
1182 goto err_unpin; 1182 goto err_unpin;
1183 1183
 1184#ifdef __NetBSD__
 1185 ring->virtual_start_map.offset = (dev_priv->mm.gtt->gma_bus_addr +
 1186 obj->gtt_offset);
 1187 ring->virtual_start_map.size = ring->size;
 1188 ring->virtual_start_map.flags = 0;
 1189 ring->virtual_start_map.flags |= _DRM_RESTRICTED;
 1190 ring->virtual_start_map.flags |= _DRM_KERNEL;
 1191 ring->virtual_start_map.flags |= _DRM_WRITE_COMBINING;
 1192 ring->virtual_start_map.flags |= _DRM_DRIVER;
 1193 ret = drm_ioremap(dev, &ring->virtual_start_map);
 1194 if (ret) {
 1195 DRM_ERROR("failed to map ring buffer\n");
 1196 goto err_unpin;
 1197 }
 1198#else
1184 ring->virtual_start = 1199 ring->virtual_start =
1185 ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, 1200 ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
1186 ring->size); 1201 ring->size);
1187 if (ring->virtual_start == NULL) { 1202 if (ring->virtual_start == NULL) {
1188 DRM_ERROR("Failed to map ringbuffer.\n"); 1203 DRM_ERROR("Failed to map ringbuffer.\n");
1189 ret = -EINVAL; 1204 ret = -EINVAL;
1190 goto err_unpin; 1205 goto err_unpin;
1191 } 1206 }
 1207#endif
1192 1208
1193 ret = ring->init(ring); 1209 ret = ring->init(ring);
1194 if (ret) 1210 if (ret)
1195 goto err_unmap; 1211 goto err_unmap;
1196 1212
1197 /* Workaround an erratum on the i830 which causes a hang if 1213 /* Workaround an erratum on the i830 which causes a hang if
1198 * the TAIL pointer points to within the last 2 cachelines 1214 * the TAIL pointer points to within the last 2 cachelines
1199 * of the buffer. 1215 * of the buffer.
1200 */ 1216 */
1201 ring->effective_size = ring->size; 1217 ring->effective_size = ring->size;
1202 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 1218 if (IS_I830(ring->dev) || IS_845G(ring->dev))
1203 ring->effective_size -= 128; 1219 ring->effective_size -= 128;
1204 1220
1205 return 0; 1221 return 0;
1206 1222
1207err_unmap: 1223err_unmap:
 1224#ifdef __NetBSD__
 1225 drm_iounmap(dev, &ring->virtual_start_map);
 1226#else
1208 iounmap(ring->virtual_start); 1227 iounmap(ring->virtual_start);
 1228#endif
1209err_unpin: 1229err_unpin:
1210 i915_gem_object_unpin(obj); 1230 i915_gem_object_unpin(obj);
1211err_unref: 1231err_unref:
1212 drm_gem_object_unreference(&obj->base); 1232 drm_gem_object_unreference(&obj->base);
1213 ring->obj = NULL; 1233 ring->obj = NULL;
1214err_hws: 1234err_hws:
1215 cleanup_status_page(ring); 1235 cleanup_status_page(ring);
1216 return ret; 1236 return ret;
1217} 1237}
1218 1238
1219void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) 1239void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1220{ 1240{
1221 struct drm_i915_private *dev_priv; 1241 struct drm_i915_private *dev_priv;
@@ -1223,27 +1243,31 @@ void intel_cleanup_ring_buffer(struct in @@ -1223,27 +1243,31 @@ void intel_cleanup_ring_buffer(struct in
1223 1243
1224 if (ring->obj == NULL) 1244 if (ring->obj == NULL)
1225 return; 1245 return;
1226 1246
1227 /* Disable the ring buffer. The ring must be idle at this point */ 1247 /* Disable the ring buffer. The ring must be idle at this point */
1228 dev_priv = ring->dev->dev_private; 1248 dev_priv = ring->dev->dev_private;
1229 ret = intel_ring_idle(ring); 1249 ret = intel_ring_idle(ring);
1230 if (ret) 1250 if (ret)
1231 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 1251 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1232 ring->name, ret); 1252 ring->name, ret);
1233 1253
1234 I915_WRITE_CTL(ring, 0); 1254 I915_WRITE_CTL(ring, 0);
1235 1255
 1256#ifdef __NetBSD__
 1257 drm_iounmap(dev, &ring->virtual_start_map);
 1258#else
1236 iounmap(ring->virtual_start); 1259 iounmap(ring->virtual_start);
 1260#endif
1237 1261
1238 i915_gem_object_unpin(ring->obj); 1262 i915_gem_object_unpin(ring->obj);
1239 drm_gem_object_unreference(&ring->obj->base); 1263 drm_gem_object_unreference(&ring->obj->base);
1240 ring->obj = NULL; 1264 ring->obj = NULL;
1241 1265
1242 if (ring->cleanup) 1266 if (ring->cleanup)
1243 ring->cleanup(ring); 1267 ring->cleanup(ring);
1244 1268
1245 cleanup_status_page(ring); 1269 cleanup_status_page(ring);
1246} 1270}
1247 1271
1248static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) 1272static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1249{ 1273{
@@ -1712,32 +1736,48 @@ int intel_render_ring_init_dri(struct dr @@ -1712,32 +1736,48 @@ int intel_render_ring_init_dri(struct dr
1712 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 1736 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1713 ring->init = init_render_ring; 1737 ring->init = init_render_ring;
1714 ring->cleanup = render_ring_cleanup; 1738 ring->cleanup = render_ring_cleanup;
1715 1739
1716 ring->dev = dev; 1740 ring->dev = dev;
1717 INIT_LIST_HEAD(&ring->active_list); 1741 INIT_LIST_HEAD(&ring->active_list);
1718 INIT_LIST_HEAD(&ring->request_list); 1742 INIT_LIST_HEAD(&ring->request_list);
1719 1743
1720 ring->size = size; 1744 ring->size = size;
1721 ring->effective_size = ring->size; 1745 ring->effective_size = ring->size;
1722 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 1746 if (IS_I830(ring->dev) || IS_845G(ring->dev))
1723 ring->effective_size -= 128; 1747 ring->effective_size -= 128;
1724 1748
 1749#ifdef __NetBSD__
 1750 ring->virtual_start_map.offset = start;
 1751 ring->virtual_start_map.size = size;
 1752 ring->virtual_start_map.type = _DRM_REGISTERS;
 1753 ring->virtual_start_map.flags = 0;
 1754 ring->virtual_start_map.flags |= _DRM_RESTRICTED;
 1755 ring->virtual_start_map.flags |= _DRM_KERNEL;
 1756 ring->virtual_start_map.flags |= _DRM_WRITE_COMBINING;
 1757 ring->virtual_start_map.flags |= _DRM_DRIVER;
 1758 ret = drm_ioremap(dev, &ring->virtual_start_map);
 1759 if (ret) {
 1760 DRM_ERROR("cannot ioremap virtual address for ring buffer\n");
 1761 return ret;
 1762 }
 1763#else
1725 ring->virtual_start = ioremap_wc(start, size); 1764 ring->virtual_start = ioremap_wc(start, size);
1726 if (ring->virtual_start == NULL) { 1765 if (ring->virtual_start == NULL) {
1727 DRM_ERROR("can not ioremap virtual address for" 1766 DRM_ERROR("can not ioremap virtual address for"
1728 " ring buffer\n"); 1767 " ring buffer\n");
1729 return -ENOMEM; 1768 return -ENOMEM;
1730 } 1769 }
 1770#endif
1731 1771
1732 if (!I915_NEED_GFX_HWS(dev)) { 1772 if (!I915_NEED_GFX_HWS(dev)) {
1733 ret = init_phys_hws_pga(ring); 1773 ret = init_phys_hws_pga(ring);
1734 if (ret) 1774 if (ret)
1735 return ret; 1775 return ret;
1736 } 1776 }
1737 1777
1738 return 0; 1778 return 0;
1739} 1779}
1740 1780
1741int intel_init_bsd_ring_buffer(struct drm_device *dev) 1781int intel_init_bsd_ring_buffer(struct drm_device *dev)
1742{ 1782{
1743 drm_i915_private_t *dev_priv = dev->dev_private; 1783 drm_i915_private_t *dev_priv = dev->dev_private;

cvs diff -r1.1.1.1.2.3 -r1.1.1.1.2.4 src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_ringbuffer.h (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_ringbuffer.h 2013/07/24 02:59:29 1.1.1.1.2.3
+++ src/sys/external/bsd/drm2/dist/drm/i915/Attic/intel_ringbuffer.h 2013/07/24 02:59:46 1.1.1.1.2.4
@@ -38,27 +38,31 @@ struct intel_hw_status_page { @@ -38,27 +38,31 @@ struct intel_hw_status_page {
38#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) 38#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
39#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) 39#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
40#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) 40#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
41 41
42struct intel_ring_buffer { 42struct intel_ring_buffer {
43 const char *name; 43 const char *name;
44 enum intel_ring_id { 44 enum intel_ring_id {
45 RCS = 0x0, 45 RCS = 0x0,
46 VCS, 46 VCS,
47 BCS, 47 BCS,
48 } id; 48 } id;
49#define I915_NUM_RINGS 3 49#define I915_NUM_RINGS 3
50 u32 mmio_base; 50 u32 mmio_base;
 51#ifdef __NetBSD__
 52 struct drm_local_map virtual_start_map;
 53#else
51 void __iomem *virtual_start; 54 void __iomem *virtual_start;
 55#endif
52 struct drm_device *dev; 56 struct drm_device *dev;
53 struct drm_i915_gem_object *obj; 57 struct drm_i915_gem_object *obj;
54 58
55 u32 head; 59 u32 head;
56 u32 tail; 60 u32 tail;
57 int space; 61 int space;
58 int size; 62 int size;
59 int effective_size; 63 int effective_size;
60 struct intel_hw_status_page status_page; 64 struct intel_hw_status_page status_page;
61 65
62 /** We track the position of the requests in the ring buffer, and 66 /** We track the position of the requests in the ring buffer, and
63 * when each is retired we increment last_retired_head as the GPU 67 * when each is retired we increment last_retired_head as the GPU
64 * must have finished processing the request and so we know we 68 * must have finished processing the request and so we know we
@@ -195,27 +199,31 @@ intel_read_status_page(struct intel_ring @@ -195,27 +199,31 @@ intel_read_status_page(struct intel_ring
195 * 199 *
196 * The area from dword 0x20 to 0x3ff is available for driver usage. 200 * The area from dword 0x20 to 0x3ff is available for driver usage.
197 */ 201 */
198#define I915_GEM_HWS_INDEX 0x20 202#define I915_GEM_HWS_INDEX 0x20
199#define I915_GEM_HWS_SCRATCH_INDEX 0x30 203#define I915_GEM_HWS_SCRATCH_INDEX 0x30
200#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 204#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
201 205
202void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 206void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
203 207
204int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 208int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
205static inline void intel_ring_emit(struct intel_ring_buffer *ring, 209static inline void intel_ring_emit(struct intel_ring_buffer *ring,
206 u32 data) 210 u32 data)
207{ 211{
 212#ifdef __NetBSD__
 213 DRM_WRITE32(&ring->virtual_start_map, ring->tail, data);
 214#else
208 iowrite32(data, ring->virtual_start + ring->tail); 215 iowrite32(data, ring->virtual_start + ring->tail);
 216#endif
209 ring->tail += 4; 217 ring->tail += 4;
210} 218}
211void intel_ring_advance(struct intel_ring_buffer *ring); 219void intel_ring_advance(struct intel_ring_buffer *ring);
212int __must_check intel_ring_idle(struct intel_ring_buffer *ring); 220int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
213 221
214int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 222int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
215int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); 223int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
216 224
217int intel_init_render_ring_buffer(struct drm_device *dev); 225int intel_init_render_ring_buffer(struct drm_device *dev);
218int intel_init_bsd_ring_buffer(struct drm_device *dev); 226int intel_init_bsd_ring_buffer(struct drm_device *dev);
219int intel_init_blt_ring_buffer(struct drm_device *dev); 227int intel_init_blt_ring_buffer(struct drm_device *dev);
220 228
221u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 229u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);