Sun Dec 19 01:43:37 2021 UTC ()
bus-spaceify raw_reg_read/write.

XXX heavily modified - maya


(riastradh)
diff -r1.20 -r1.21 src/sys/external/bsd/drm2/dist/drm/i915/i915_irq.c
diff -r1.2 -r1.3 src/sys/external/bsd/drm2/dist/drm/i915/intel_uncore.h
diff -r1.2 -r1.3 src/sys/external/bsd/drm2/dist/drm/i915/gt/intel_gt_irq.c

cvs diff -r1.20 -r1.21 src/sys/external/bsd/drm2/dist/drm/i915/i915_irq.c (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/i915/i915_irq.c 2021/12/18 23:45:28 1.20
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_irq.c 2021/12/19 01:43:37 1.21
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: i915_irq.c,v 1.20 2021/12/18 23:45:28 riastradh Exp $ */ 1/* $NetBSD: i915_irq.c,v 1.21 2021/12/19 01:43:37 riastradh Exp $ */
2 2
3/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 3/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 */ 4 */
5/* 5/*
6 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * All Rights Reserved. 7 * All Rights Reserved.
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the 10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including 11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish, 12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to 13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to 14 * permit persons to whom the Software is furnished to do so, subject to
@@ -19,27 +19,27 @@ @@ -19,27 +19,27 @@
19 * of the Software. 19 * of the Software.
20 * 20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
25 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 * 28 *
29 */ 29 */
30 30
31#include <sys/cdefs.h> 31#include <sys/cdefs.h>
32__KERNEL_RCSID(0, "$NetBSD: i915_irq.c,v 1.20 2021/12/18 23:45:28 riastradh Exp $"); 32__KERNEL_RCSID(0, "$NetBSD: i915_irq.c,v 1.21 2021/12/19 01:43:37 riastradh Exp $");
33 33
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 35
36#include <linux/circ_buf.h> 36#include <linux/circ_buf.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/sysrq.h> 38#include <linux/sysrq.h>
39 39
40#include <drm/drm_drv.h> 40#include <drm/drm_drv.h>
41#include <drm/drm_irq.h> 41#include <drm/drm_irq.h>
42#include <drm/i915_drm.h> 42#include <drm/i915_drm.h>
43 43
44#include "display/intel_display_types.h" 44#include "display/intel_display_types.h"
45#include "display/intel_fifo_underrun.h" 45#include "display/intel_fifo_underrun.h"
@@ -2401,68 +2401,66 @@ static inline u32 gen8_master_intr_disab @@ -2401,68 +2401,66 @@ static inline u32 gen8_master_intr_disab
2401 * and will generate new interrupt after enabling master. 2401 * and will generate new interrupt after enabling master.
2402 */ 2402 */
2403 return raw_reg_read(regs, GEN8_MASTER_IRQ); 2403 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2404} 2404}
2405 2405
2406static inline void gen8_master_intr_enable(void __iomem * const regs) 2406static inline void gen8_master_intr_enable(void __iomem * const regs)
2407{ 2407{
2408 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2408 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2409} 2409}
2410 2410
2411static irqreturn_t gen8_irq_handler(int irq, void *arg) 2411static irqreturn_t gen8_irq_handler(int irq, void *arg)
2412{ 2412{
2413 struct drm_i915_private *dev_priv = arg; 2413 struct drm_i915_private *dev_priv = arg;
2414 void __iomem * const regs = dev_priv->uncore.regs; 
2415 u32 master_ctl; 2414 u32 master_ctl;
2416 u32 gt_iir[4]; 2415 u32 gt_iir[4];
2417 2416
2418 if (!intel_irqs_enabled(dev_priv)) 2417 if (!intel_irqs_enabled(dev_priv))
2419 return IRQ_NONE; 2418 return IRQ_NONE;
2420 2419
2421 master_ctl = gen8_master_intr_disable(regs); 2420 master_ctl = gen8_master_intr_disable(&dev_priv->uncore);
2422 if (!master_ctl) { 2421 if (!master_ctl) {
2423 gen8_master_intr_enable(regs); 2422 gen8_master_intr_enable(&dev_priv->uncore);
2424 return IRQ_NONE; 2423 return IRQ_NONE;
2425 } 2424 }
2426 2425
2427 /* Find, clear, then process each source of interrupt */ 2426 /* Find, clear, then process each source of interrupt */
2428 gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir); 2427 gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
2429 2428
2430 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2429 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2431 if (master_ctl & ~GEN8_GT_IRQS) { 2430 if (master_ctl & ~GEN8_GT_IRQS) {
2432 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2431 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2433 gen8_de_irq_handler(dev_priv, master_ctl); 2432 gen8_de_irq_handler(dev_priv, master_ctl);
2434 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2433 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2435 } 2434 }
2436 2435
2437 gen8_master_intr_enable(regs); 2436 gen8_master_intr_enable(&dev_priv->uncore);
2438 2437
2439 gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir); 2438 gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
2440 2439
2441 return IRQ_HANDLED; 2440 return IRQ_HANDLED;
2442} 2441}
2443 2442
2444static u32 2443static u32
2445gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl) 2444gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2446{ 2445{
2447 void __iomem * const regs = gt->uncore->regs; 
2448 u32 iir; 2446 u32 iir;
2449 2447
2450 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 2448 if (!(master_ctl & GEN11_GU_MISC_IRQ))
2451 return 0; 2449 return 0;
2452 2450
2453 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 2451 iir = raw_reg_read(gt->uncore, GEN11_GU_MISC_IIR);
2454 if (likely(iir)) 2452 if (likely(iir))
2455 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 2453 raw_reg_write(gt->uncore, GEN11_GU_MISC_IIR, iir);
2456 2454
2457 return iir; 2455 return iir;
2458} 2456}
2459 2457
2460static void 2458static void
2461gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir) 2459gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2462{ 2460{
2463 if (iir & GEN11_GU_MISC_GSE) 2461 if (iir & GEN11_GU_MISC_GSE)
2464 intel_opregion_asle_intr(gt->i915); 2462 intel_opregion_asle_intr(gt->i915);
2465} 2463}
2466 2464
2467static inline u32 gen11_master_intr_disable(void __iomem * const regs) 2465static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2468{ 2466{
@@ -2475,71 +2473,69 @@ static inline u32 gen11_master_intr_disa @@ -2475,71 +2473,69 @@ static inline u32 gen11_master_intr_disa
2475 * and will generate new interrupt after enabling master. 2473 * and will generate new interrupt after enabling master.
2476 */ 2474 */
2477 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2475 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2478} 2476}
2479 2477
2480static inline void gen11_master_intr_enable(void __iomem * const regs) 2478static inline void gen11_master_intr_enable(void __iomem * const regs)
2481{ 2479{
2482 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 2480 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2483} 2481}
2484 2482
2485static void 2483static void
2486gen11_display_irq_handler(struct drm_i915_private *i915) 2484gen11_display_irq_handler(struct drm_i915_private *i915)
2487{ 2485{
2488 void __iomem * const regs = i915->uncore.regs; 2486 const u32 disp_ctl = raw_reg_read(&i915->uncore, GEN11_DISPLAY_INT_CTL);
2489 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 
2490 2487
2491 disable_rpm_wakeref_asserts(&i915->runtime_pm); 2488 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2492 /* 2489 /*
2493 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 2490 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2494 * for the display related bits. 2491 * for the display related bits.
2495 */ 2492 */
2496 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); 2493 raw_reg_write(&i915->uncore, GEN11_DISPLAY_INT_CTL, 0x0);
2497 gen8_de_irq_handler(i915, disp_ctl); 2494 gen8_de_irq_handler(i915, disp_ctl);
2498 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 2495 raw_reg_write(&i915->uncore, GEN11_DISPLAY_INT_CTL,
2499 GEN11_DISPLAY_IRQ_ENABLE); 2496 GEN11_DISPLAY_IRQ_ENABLE);
2500 2497
2501 enable_rpm_wakeref_asserts(&i915->runtime_pm); 2498 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2502} 2499}
2503 2500
2504static __always_inline irqreturn_t 2501static __always_inline irqreturn_t
2505__gen11_irq_handler(struct drm_i915_private * const i915, 2502__gen11_irq_handler(struct drm_i915_private * const i915,
2506 u32 (*intr_disable)(void __iomem * const regs), 2503 u32 (*intr_disable)(void __iomem * const regs),
2507 void (*intr_enable)(void __iomem * const regs)) 2504 void (*intr_enable)(void __iomem * const regs))
2508{ 2505{
2509 void __iomem * const regs = i915->uncore.regs; 
2510 struct intel_gt *gt = &i915->gt; 2506 struct intel_gt *gt = &i915->gt;
2511 u32 master_ctl; 2507 u32 master_ctl;
2512 u32 gu_misc_iir; 2508 u32 gu_misc_iir;
2513 2509
2514 if (!intel_irqs_enabled(i915)) 2510 if (!intel_irqs_enabled(i915))
2515 return IRQ_NONE; 2511 return IRQ_NONE;
2516 2512
2517 master_ctl = intr_disable(regs); 2513 master_ctl = intr_disable(&i915->uncore);
2518 if (!master_ctl) { 2514 if (!master_ctl) {
2519 intr_enable(regs); 2515 intr_enable(&i915->uncore);
2520 return IRQ_NONE; 2516 return IRQ_NONE;
2521 } 2517 }
2522 2518
2523 /* Find, clear, then process each source of interrupt. */ 2519 /* Find, clear, then process each source of interrupt. */
2524 gen11_gt_irq_handler(gt, master_ctl); 2520 gen11_gt_irq_handler(gt, master_ctl);
2525 2521
2526 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2522 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2527 if (master_ctl & GEN11_DISPLAY_IRQ) 2523 if (master_ctl & GEN11_DISPLAY_IRQ)
2528 gen11_display_irq_handler(i915); 2524 gen11_display_irq_handler(i915);
2529 2525
2530 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); 2526 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2531 2527
2532 intr_enable(regs); 2528 intr_enable(&i915->uncore);
2533 2529
2534 gen11_gu_misc_irq_handler(gt, gu_misc_iir); 2530 gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2535 2531
2536 return IRQ_HANDLED; 2532 return IRQ_HANDLED;
2537} 2533}
2538 2534
2539static irqreturn_t gen11_irq_handler(int irq, void *arg) 2535static irqreturn_t gen11_irq_handler(int irq, void *arg)
2540{ 2536{
2541 return __gen11_irq_handler(arg, 2537 return __gen11_irq_handler(arg,
2542 gen11_master_intr_disable, 2538 gen11_master_intr_disable,
2543 gen11_master_intr_enable); 2539 gen11_master_intr_enable);
2544} 2540}
2545 2541

cvs diff -r1.2 -r1.3 src/sys/external/bsd/drm2/dist/drm/i915/intel_uncore.h (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/i915/intel_uncore.h 2021/12/18 23:45:29 1.2
+++ src/sys/external/bsd/drm2/dist/drm/i915/intel_uncore.h 2021/12/19 01:43:37 1.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: intel_uncore.h,v 1.2 2021/12/18 23:45:29 riastradh Exp $ */ 1/* $NetBSD: intel_uncore.h,v 1.3 2021/12/19 01:43:37 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright © 2017 Intel Corporation 4 * Copyright © 2017 Intel Corporation
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation 8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the 10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions: 11 * Software is furnished to do so, subject to the following conditions:
12 * 12 *
13 * The above copyright notice and this permission notice (including the next 13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the 14 * paragraph) shall be included in all copies or substantial portions of the
@@ -401,19 +401,28 @@ static inline void intel_uncore_rmw_fw(s @@ -401,19 +401,28 @@ static inline void intel_uncore_rmw_fw(s
401 401
402static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore, 402static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
403 i915_reg_t reg, u32 val, 403 i915_reg_t reg, u32 val,
404 u32 mask, u32 expected_val) 404 u32 mask, u32 expected_val)
405{ 405{
406 u32 reg_val; 406 u32 reg_val;
407 407
408 intel_uncore_write(uncore, reg, val); 408 intel_uncore_write(uncore, reg, val);
409 reg_val = intel_uncore_read(uncore, reg); 409 reg_val = intel_uncore_read(uncore, reg);
410 410
411 return (reg_val & mask) != expected_val ? -EINVAL : 0; 411 return (reg_val & mask) != expected_val ? -EINVAL : 0;
412} 412}
413 413
 414#ifdef __NetBSD__
 415#define raw_reg_read(i915, reg) \
 416 bus_space_read_4((i915)->regs_bst, (i915)->regs_bsh, \
 417 i915_mmio_reg_offset(reg))
 418#define raw_reg_write(i915, reg, value) \
 419 bus_space_write_4((i915)->regs_bst, (i915)->regs_bsh, \
 420 i915_mmio_reg_offset(reg), (value))
 421#else
414#define raw_reg_read(base, reg) \ 422#define raw_reg_read(base, reg) \
415 readl(base + i915_mmio_reg_offset(reg)) 423 readl(base + i915_mmio_reg_offset(reg))
416#define raw_reg_write(base, reg, value) \ 424#define raw_reg_write(base, reg, value) \
417 writel(value, base + i915_mmio_reg_offset(reg)) 425 writel(value, base + i915_mmio_reg_offset(reg))
 426#endif
418 427
419#endif /* !__INTEL_UNCORE_H__ */ 428#endif /* !__INTEL_UNCORE_H__ */

cvs diff -r1.2 -r1.3 src/sys/external/bsd/drm2/dist/drm/i915/gt/intel_gt_irq.c (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/i915/gt/intel_gt_irq.c 2021/12/18 23:45:30 1.2
+++ src/sys/external/bsd/drm2/dist/drm/i915/gt/intel_gt_irq.c 2021/12/19 01:43:37 1.3
@@ -1,23 +1,23 @@ @@ -1,23 +1,23 @@
1/* $NetBSD: intel_gt_irq.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $ */ 1/* $NetBSD: intel_gt_irq.c,v 1.3 2021/12/19 01:43:37 riastradh Exp $ */
2 2
3/* 3/*
4 * SPDX-License-Identifier: MIT 4 * SPDX-License-Identifier: MIT
5 * 5 *
6 * Copyright © 2019 Intel Corporation 6 * Copyright © 2019 Intel Corporation
7 */ 7 */
8 8
9#include <sys/cdefs.h> 9#include <sys/cdefs.h>
10__KERNEL_RCSID(0, "$NetBSD: intel_gt_irq.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $"); 10__KERNEL_RCSID(0, "$NetBSD: intel_gt_irq.c,v 1.3 2021/12/19 01:43:37 riastradh Exp $");
11 11
12#include <linux/sched/clock.h> 12#include <linux/sched/clock.h>
13 13
14#include "i915_drv.h" 14#include "i915_drv.h"
15#include "i915_irq.h" 15#include "i915_irq.h"
16#include "intel_gt.h" 16#include "intel_gt.h"
17#include "intel_gt_irq.h" 17#include "intel_gt_irq.h"
18#include "intel_uncore.h" 18#include "intel_uncore.h"
19#include "intel_rps.h" 19#include "intel_rps.h"
20 20
21static void guc_irq_handler(struct intel_guc *guc, u16 iir) 21static void guc_irq_handler(struct intel_guc *guc, u16 iir)
22{ 22{
23 if (iir & GUC_INTR_GUC2HOST) 23 if (iir & GUC_INTR_GUC2HOST)
@@ -35,51 +35,50 @@ cs_irq_handler(struct intel_engine_cs *e @@ -35,51 +35,50 @@ cs_irq_handler(struct intel_engine_cs *e
35 if (iir & GT_RENDER_USER_INTERRUPT) { 35 if (iir & GT_RENDER_USER_INTERRUPT) {
36 intel_engine_signal_breadcrumbs(engine); 36 intel_engine_signal_breadcrumbs(engine);
37 tasklet |= intel_engine_needs_breadcrumb_tasklet(engine); 37 tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
38 } 38 }
39 39
40 if (tasklet) 40 if (tasklet)
41 tasklet_hi_schedule(&engine->execlists.tasklet); 41 tasklet_hi_schedule(&engine->execlists.tasklet);
42} 42}
43 43
44static u32 44static u32
45gen11_gt_engine_identity(struct intel_gt *gt, 45gen11_gt_engine_identity(struct intel_gt *gt,
46 const unsigned int bank, const unsigned int bit) 46 const unsigned int bank, const unsigned int bit)
47{ 47{
48 void __iomem * const regs = gt->uncore->regs; 
49 u32 timeout_ts; 48 u32 timeout_ts;
50 u32 ident; 49 u32 ident;
51 50
52 lockdep_assert_held(&gt->irq_lock); 51 lockdep_assert_held(&gt->irq_lock);
53 52
54 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 53 raw_reg_write(gt->uncore, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
55 54
56 /* 55 /*
57 * NB: Specs do not specify how long to spin wait, 56 * NB: Specs do not specify how long to spin wait,
58 * so we do ~100us as an educated guess. 57 * so we do ~100us as an educated guess.
59 */ 58 */
60 timeout_ts = (local_clock() >> 10) + 100; 59 timeout_ts = (local_clock() >> 10) + 100;
61 do { 60 do {
62 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 61 ident = raw_reg_read(gt->uncore, GEN11_INTR_IDENTITY_REG(bank));
63 } while (!(ident & GEN11_INTR_DATA_VALID) && 62 } while (!(ident & GEN11_INTR_DATA_VALID) &&
64 !time_after32(local_clock() >> 10, timeout_ts)); 63 !time_after32(local_clock() >> 10, timeout_ts));
65 64
66 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 65 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
67 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 66 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
68 bank, bit, ident); 67 bank, bit, ident);
69 return 0; 68 return 0;
70 } 69 }
71 70
72 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 71 raw_reg_write(gt->uncore, GEN11_INTR_IDENTITY_REG(bank),
73 GEN11_INTR_DATA_VALID); 72 GEN11_INTR_DATA_VALID);
74 73
75 return ident; 74 return ident;
76} 75}
77 76
78static void 77static void
79gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, 78gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
80 const u16 iir) 79 const u16 iir)
81{ 80{
82 if (instance == OTHER_GUC_INSTANCE) 81 if (instance == OTHER_GUC_INSTANCE)
83 return guc_irq_handler(&gt->uc.guc, iir); 82 return guc_irq_handler(&gt->uc.guc, iir);
84 83
85 if (instance == OTHER_GTPM_INSTANCE) 84 if (instance == OTHER_GTPM_INSTANCE)
@@ -120,81 +119,79 @@ gen11_gt_identity_handler(struct intel_g @@ -120,81 +119,79 @@ gen11_gt_identity_handler(struct intel_g
120 if (class <= COPY_ENGINE_CLASS) 119 if (class <= COPY_ENGINE_CLASS)
121 return gen11_engine_irq_handler(gt, class, instance, intr); 120 return gen11_engine_irq_handler(gt, class, instance, intr);
122 121
123 if (class == OTHER_CLASS) 122 if (class == OTHER_CLASS)
124 return gen11_other_irq_handler(gt, instance, intr); 123 return gen11_other_irq_handler(gt, instance, intr);
125 124
126 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 125 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
127 class, instance, intr); 126 class, instance, intr);
128} 127}
129 128
130static void 129static void
131gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank) 130gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
132{ 131{
133 void __iomem * const regs = gt->uncore->regs; 
134 unsigned long intr_dw; 132 unsigned long intr_dw;
135 unsigned int bit; 133 unsigned int bit;
136 134
137 lockdep_assert_held(&gt->irq_lock); 135 lockdep_assert_held(&gt->irq_lock);
138 136
139 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 137 intr_dw = raw_reg_read(gt->uncore, GEN11_GT_INTR_DW(bank));
140 138
141 for_each_set_bit(bit, &intr_dw, 32) { 139 for_each_set_bit(bit, &intr_dw, 32) {
142 const u32 ident = gen11_gt_engine_identity(gt, bank, bit); 140 const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
143 141
144 gen11_gt_identity_handler(gt, ident); 142 gen11_gt_identity_handler(gt, ident);
145 } 143 }
146 144
147 /* Clear must be after shared has been served for engine */ 145 /* Clear must be after shared has been served for engine */
148 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 146 raw_reg_write(gt->uncore, GEN11_GT_INTR_DW(bank), intr_dw);
149} 147}
150 148
151void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl) 149void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
152{ 150{
153 unsigned int bank; 151 unsigned int bank;
154 152
155 spin_lock(&gt->irq_lock); 153 spin_lock(&gt->irq_lock);
156 154
157 for (bank = 0; bank < 2; bank++) { 155 for (bank = 0; bank < 2; bank++) {
158 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 156 if (master_ctl & GEN11_GT_DW_IRQ(bank))
159 gen11_gt_bank_handler(gt, bank); 157 gen11_gt_bank_handler(gt, bank);
160 } 158 }
161 159
162 spin_unlock(&gt->irq_lock); 160 spin_unlock(&gt->irq_lock);
163} 161}
164 162
165bool gen11_gt_reset_one_iir(struct intel_gt *gt, 163bool gen11_gt_reset_one_iir(struct intel_gt *gt,
166 const unsigned int bank, const unsigned int bit) 164 const unsigned int bank, const unsigned int bit)
167{ 165{
168 void __iomem * const regs = gt->uncore->regs; 
169 u32 dw; 166 u32 dw;
170 167
171 lockdep_assert_held(&gt->irq_lock); 168 lockdep_assert_held(&gt->irq_lock);
172 169
173 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 170 dw = raw_reg_read(gt->uncore, GEN11_GT_INTR_DW(bank));
174 if (dw & BIT(bit)) { 171 if (dw & BIT(bit)) {
175 /* 172 /*
176 * According to the BSpec, DW_IIR bits cannot be cleared without 173 * According to the BSpec, DW_IIR bits cannot be cleared without
177 * first servicing the Selector & Shared IIR registers. 174 * first servicing the Selector & Shared IIR registers.
178 */ 175 */
179 gen11_gt_engine_identity(gt, bank, bit); 176 gen11_gt_engine_identity(gt, bank, bit);
180 177
181 /* 178 /*
182 * We locked GT INT DW by reading it. If we want to (try 179 * We locked GT INT DW by reading it. If we want to (try
183 * to) recover from this successfully, we need to clear 180 * to) recover from this successfully, we need to clear
184 * our bit, otherwise we are locking the register for 181 * our bit, otherwise we are locking the register for
185 * everybody. 182 * everybody.
186 */ 183 */
187 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 184 raw_reg_write(gt->uncore, GEN11_GT_INTR_DW(bank), BIT(bit));
188 185
189 return true; 186 return true;
190 } 187 }
191 188
192 return false; 189 return false;
193} 190}
194 191
195void gen11_gt_irq_reset(struct intel_gt *gt) 192void gen11_gt_irq_reset(struct intel_gt *gt)
196{ 193{
197 struct intel_uncore *uncore = gt->uncore; 194 struct intel_uncore *uncore = gt->uncore;
198 195
199 /* Disable RCS, BCS, VCS and VECS class engines. */ 196 /* Disable RCS, BCS, VCS and VECS class engines. */
200 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0); 197 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
@@ -283,50 +280,49 @@ void gen6_gt_irq_handler(struct intel_gt @@ -283,50 +280,49 @@ void gen6_gt_irq_handler(struct intel_gt
283 intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]); 280 intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]);
284 281
285 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 282 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
286 GT_BSD_CS_ERROR_INTERRUPT | 283 GT_BSD_CS_ERROR_INTERRUPT |
287 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 284 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
288 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 285 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
289 286
290 if (gt_iir & GT_PARITY_ERROR(gt->i915)) 287 if (gt_iir & GT_PARITY_ERROR(gt->i915))
291 gen7_parity_error_irq_handler(gt, gt_iir); 288 gen7_parity_error_irq_handler(gt, gt_iir);
292} 289}
293 290
294void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]) 291void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
295{ 292{
296 void __iomem * const regs = gt->uncore->regs; 
297 293
298 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 294 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
299 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 295 gt_iir[0] = raw_reg_read(gt->uncore, GEN8_GT_IIR(0));
300 if (likely(gt_iir[0])) 296 if (likely(gt_iir[0]))
301 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 297 raw_reg_write(gt->uncore, GEN8_GT_IIR(0), gt_iir[0]);
302 } 298 }
303 299
304 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { 300 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
305 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 301 gt_iir[1] = raw_reg_read(gt->uncore, GEN8_GT_IIR(1));
306 if (likely(gt_iir[1])) 302 if (likely(gt_iir[1]))
307 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 303 raw_reg_write(gt->uncore, GEN8_GT_IIR(1), gt_iir[1]);
308 } 304 }
309 305
310 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 306 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
311 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 307 gt_iir[2] = raw_reg_read(gt->uncore, GEN8_GT_IIR(2));
312 if (likely(gt_iir[2])) 308 if (likely(gt_iir[2]))
313 raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]); 309 raw_reg_write(gt->uncore, GEN8_GT_IIR(2), gt_iir[2]);
314 } 310 }
315 311
316 if (master_ctl & GEN8_GT_VECS_IRQ) { 312 if (master_ctl & GEN8_GT_VECS_IRQ) {
317 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 313 gt_iir[3] = raw_reg_read(gt->uncore, GEN8_GT_IIR(3));
318 if (likely(gt_iir[3])) 314 if (likely(gt_iir[3]))
319 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 315 raw_reg_write(gt->uncore, GEN8_GT_IIR(3), gt_iir[3]);
320 } 316 }
321} 317}
322 318
323void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]) 319void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
324{ 320{
325 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 321 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
326 cs_irq_handler(gt->engine_class[RENDER_CLASS][0], 322 cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
327 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); 323 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
328 cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0], 324 cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
329 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); 325 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
330 } 326 }
331 327
332 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { 328 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {