| @@ -1171,51 +1171,71 @@ static int intel_init_ring_buffer(struct | | | @@ -1171,51 +1171,71 @@ static int intel_init_ring_buffer(struct |
1171 | goto err_hws; | | 1171 | goto err_hws; |
1172 | } | | 1172 | } |
1173 | | | 1173 | |
1174 | ring->obj = obj; | | 1174 | ring->obj = obj; |
1175 | | | 1175 | |
1176 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); | | 1176 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); |
1177 | if (ret) | | 1177 | if (ret) |
1178 | goto err_unref; | | 1178 | goto err_unref; |
1179 | | | 1179 | |
1180 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | | 1180 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
1181 | if (ret) | | 1181 | if (ret) |
1182 | goto err_unpin; | | 1182 | goto err_unpin; |
1183 | | | 1183 | |
| | | 1184 | #ifdef __NetBSD__ |
| | | 1185 | ring->virtual_start_map.offset = (dev_priv->mm.gtt->gma_bus_addr + |
| | | 1186 | obj->gtt_offset); |
| | | 1187 | ring->virtual_start_map.size = ring->size; |
| | | 1188 | ring->virtual_start_map.flags = 0; |
| | | 1189 | ring->virtual_start_map.flags |= _DRM_RESTRICTED; |
| | | 1190 | ring->virtual_start_map.flags |= _DRM_KERNEL; |
| | | 1191 | ring->virtual_start_map.flags |= _DRM_WRITE_COMBINING; |
| | | 1192 | ring->virtual_start_map.flags |= _DRM_DRIVER; |
| | | 1193 | ret = drm_ioremap(dev, &ring->virtual_start_map); |
| | | 1194 | if (ret) { |
| | | 1195 | DRM_ERROR("failed to map ring buffer\n"); |
| | | 1196 | goto err_unpin; |
| | | 1197 | } |
| | | 1198 | #else |
1184 | ring->virtual_start = | | 1199 | ring->virtual_start = |
1185 | ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, | | 1200 | ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, |
1186 | ring->size); | | 1201 | ring->size); |
1187 | if (ring->virtual_start == NULL) { | | 1202 | if (ring->virtual_start == NULL) { |
1188 | DRM_ERROR("Failed to map ringbuffer.\n"); | | 1203 | DRM_ERROR("Failed to map ringbuffer.\n"); |
1189 | ret = -EINVAL; | | 1204 | ret = -EINVAL; |
1190 | goto err_unpin; | | 1205 | goto err_unpin; |
1191 | } | | 1206 | } |
| | | 1207 | #endif |
1192 | | | 1208 | |
1193 | ret = ring->init(ring); | | 1209 | ret = ring->init(ring); |
1194 | if (ret) | | 1210 | if (ret) |
1195 | goto err_unmap; | | 1211 | goto err_unmap; |
1196 | | | 1212 | |
1197 | /* Workaround an erratum on the i830 which causes a hang if | | 1213 | /* Workaround an erratum on the i830 which causes a hang if |
1198 | * the TAIL pointer points to within the last 2 cachelines | | 1214 | * the TAIL pointer points to within the last 2 cachelines |
1199 | * of the buffer. | | 1215 | * of the buffer. |
1200 | */ | | 1216 | */ |
1201 | ring->effective_size = ring->size; | | 1217 | ring->effective_size = ring->size; |
1202 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) | | 1218 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) |
1203 | ring->effective_size -= 128; | | 1219 | ring->effective_size -= 128; |
1204 | | | 1220 | |
1205 | return 0; | | 1221 | return 0; |
1206 | | | 1222 | |
1207 | err_unmap: | | 1223 | err_unmap: |
| | | 1224 | #ifdef __NetBSD__ |
| | | 1225 | drm_iounmap(dev, &ring->virtual_start_map); |
| | | 1226 | #else |
1208 | iounmap(ring->virtual_start); | | 1227 | iounmap(ring->virtual_start); |
| | | 1228 | #endif |
1209 | err_unpin: | | 1229 | err_unpin: |
1210 | i915_gem_object_unpin(obj); | | 1230 | i915_gem_object_unpin(obj); |
1211 | err_unref: | | 1231 | err_unref: |
1212 | drm_gem_object_unreference(&obj->base); | | 1232 | drm_gem_object_unreference(&obj->base); |
1213 | ring->obj = NULL; | | 1233 | ring->obj = NULL; |
1214 | err_hws: | | 1234 | err_hws: |
1215 | cleanup_status_page(ring); | | 1235 | cleanup_status_page(ring); |
1216 | return ret; | | 1236 | return ret; |
1217 | } | | 1237 | } |
1218 | | | 1238 | |
1219 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) | | 1239 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
1220 | { | | 1240 | { |
1221 | struct drm_i915_private *dev_priv; | | 1241 | struct drm_i915_private *dev_priv; |
| @@ -1223,27 +1243,31 @@ void intel_cleanup_ring_buffer(struct in | | | @@ -1223,27 +1243,31 @@ void intel_cleanup_ring_buffer(struct in |
1223 | | | 1243 | |
1224 | if (ring->obj == NULL) | | 1244 | if (ring->obj == NULL) |
1225 | return; | | 1245 | return; |
1226 | | | 1246 | |
1227 | /* Disable the ring buffer. The ring must be idle at this point */ | | 1247 | /* Disable the ring buffer. The ring must be idle at this point */ |
1228 | dev_priv = ring->dev->dev_private; | | 1248 | dev_priv = ring->dev->dev_private; |
1229 | ret = intel_ring_idle(ring); | | 1249 | ret = intel_ring_idle(ring); |
1230 | if (ret) | | 1250 | if (ret) |
1231 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | | 1251 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", |
1232 | ring->name, ret); | | 1252 | ring->name, ret); |
1233 | | | 1253 | |
1234 | I915_WRITE_CTL(ring, 0); | | 1254 | I915_WRITE_CTL(ring, 0); |
1235 | | | 1255 | |
| | | 1256 | #ifdef __NetBSD__ |
| | | 1257 | drm_iounmap(dev, &ring->virtual_start_map); |
| | | 1258 | #else |
1236 | iounmap(ring->virtual_start); | | 1259 | iounmap(ring->virtual_start); |
| | | 1260 | #endif |
1237 | | | 1261 | |
1238 | i915_gem_object_unpin(ring->obj); | | 1262 | i915_gem_object_unpin(ring->obj); |
1239 | drm_gem_object_unreference(&ring->obj->base); | | 1263 | drm_gem_object_unreference(&ring->obj->base); |
1240 | ring->obj = NULL; | | 1264 | ring->obj = NULL; |
1241 | | | 1265 | |
1242 | if (ring->cleanup) | | 1266 | if (ring->cleanup) |
1243 | ring->cleanup(ring); | | 1267 | ring->cleanup(ring); |
1244 | | | 1268 | |
1245 | cleanup_status_page(ring); | | 1269 | cleanup_status_page(ring); |
1246 | } | | 1270 | } |
1247 | | | 1271 | |
1248 | static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) | | 1272 | static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) |
1249 | { | | 1273 | { |
| @@ -1712,32 +1736,48 @@ int intel_render_ring_init_dri(struct dr | | | @@ -1712,32 +1736,48 @@ int intel_render_ring_init_dri(struct dr |
1712 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; | | 1736 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; |
1713 | ring->init = init_render_ring; | | 1737 | ring->init = init_render_ring; |
1714 | ring->cleanup = render_ring_cleanup; | | 1738 | ring->cleanup = render_ring_cleanup; |
1715 | | | 1739 | |
1716 | ring->dev = dev; | | 1740 | ring->dev = dev; |
1717 | INIT_LIST_HEAD(&ring->active_list); | | 1741 | INIT_LIST_HEAD(&ring->active_list); |
1718 | INIT_LIST_HEAD(&ring->request_list); | | 1742 | INIT_LIST_HEAD(&ring->request_list); |
1719 | | | 1743 | |
1720 | ring->size = size; | | 1744 | ring->size = size; |
1721 | ring->effective_size = ring->size; | | 1745 | ring->effective_size = ring->size; |
1722 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) | | 1746 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) |
1723 | ring->effective_size -= 128; | | 1747 | ring->effective_size -= 128; |
1724 | | | 1748 | |
| | | 1749 | #ifdef __NetBSD__ |
| | | 1750 | ring->virtual_start_map.offset = start; |
| | | 1751 | ring->virtual_start_map.size = size; |
| | | 1752 | ring->virtual_start_map.type = _DRM_REGISTERS; |
| | | 1753 | ring->virtual_start_map.flags = 0; |
| | | 1754 | ring->virtual_start_map.flags |= _DRM_RESTRICTED; |
| | | 1755 | ring->virtual_start_map.flags |= _DRM_KERNEL; |
| | | 1756 | ring->virtual_start_map.flags |= _DRM_WRITE_COMBINING; |
| | | 1757 | ring->virtual_start_map.flags |= _DRM_DRIVER; |
| | | 1758 | ret = drm_ioremap(dev, &ring->virtual_start_map); |
| | | 1759 | if (ret) { |
| | | 1760 | DRM_ERROR("cannot ioremap virtual address for ring buffer\n"); |
| | | 1761 | return ret; |
| | | 1762 | } |
| | | 1763 | #else |
1725 | ring->virtual_start = ioremap_wc(start, size); | | 1764 | ring->virtual_start = ioremap_wc(start, size); |
1726 | if (ring->virtual_start == NULL) { | | 1765 | if (ring->virtual_start == NULL) { |
1727 | DRM_ERROR("can not ioremap virtual address for" | | 1766 | DRM_ERROR("can not ioremap virtual address for" |
1728 | " ring buffer\n"); | | 1767 | " ring buffer\n"); |
1729 | return -ENOMEM; | | 1768 | return -ENOMEM; |
1730 | } | | 1769 | } |
| | | 1770 | #endif |
1731 | | | 1771 | |
1732 | if (!I915_NEED_GFX_HWS(dev)) { | | 1772 | if (!I915_NEED_GFX_HWS(dev)) { |
1733 | ret = init_phys_hws_pga(ring); | | 1773 | ret = init_phys_hws_pga(ring); |
1734 | if (ret) | | 1774 | if (ret) |
1735 | return ret; | | 1775 | return ret; |
1736 | } | | 1776 | } |
1737 | | | 1777 | |
1738 | return 0; | | 1778 | return 0; |
1739 | } | | 1779 | } |
1740 | | | 1780 | |
1741 | int intel_init_bsd_ring_buffer(struct drm_device *dev) | | 1781 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
1742 | { | | 1782 | { |
1743 | drm_i915_private_t *dev_priv = dev->dev_private; | | 1783 | drm_i915_private_t *dev_priv = dev->dev_private; |