aboutsummaryrefslogtreecommitdiff
path: root/pkgs/patches-linux-5.15/0072-irqchip-armada-370-xp-Implement-SoC-Error-interrupts.patch
diff options
context:
space:
mode:
Diffstat (limited to 'pkgs/patches-linux-5.15/0072-irqchip-armada-370-xp-Implement-SoC-Error-interrupts.patch')
-rw-r--r--pkgs/patches-linux-5.15/0072-irqchip-armada-370-xp-Implement-SoC-Error-interrupts.patch348
1 files changed, 348 insertions, 0 deletions
diff --git a/pkgs/patches-linux-5.15/0072-irqchip-armada-370-xp-Implement-SoC-Error-interrupts.patch b/pkgs/patches-linux-5.15/0072-irqchip-armada-370-xp-Implement-SoC-Error-interrupts.patch
new file mode 100644
index 0000000..5bd6786
--- /dev/null
+++ b/pkgs/patches-linux-5.15/0072-irqchip-armada-370-xp-Implement-SoC-Error-interrupts.patch
@@ -0,0 +1,348 @@
+From e4f14222579ceded47baafcbe10fc78f080e538b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali@kernel.org>
+Date: Mon, 18 Apr 2022 00:04:32 +0200
+Subject: [PATCH 72/90] irqchip/armada-370-xp: Implement SoC Error interrupts
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+MPIC IRQ 4 is used as SoC Error Summary interrupt and provides access to
+another hierarchy of SoC Error interrupts. Implement a new IRQ chip and
+domain for accessing this IRQ hierarchy.
+
+Signed-off-by: Pali Rohár <pali@kernel.org>
+---
+ drivers/irqchip/irq-armada-370-xp.c | 212 +++++++++++++++++++++++++++-
+ 1 file changed, 209 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
+index 8abc70ed30c1..2df9e21e1559 100644
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -117,6 +117,8 @@
+ /* Registers relative to main_int_base */
+ #define ARMADA_370_XP_INT_CONTROL (0x00)
+ #define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x04)
++#define ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS (0x20)
++#define ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS (0x24)
+ #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
+ #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
+ #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
+@@ -130,6 +132,8 @@
+ #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
+ #define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
+ #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
++#define ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF (0x50)
++#define ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF (0x54)
+ #define ARMADA_370_XP_INT_FABRIC_MASK_OFFS (0x54)
+ #define ARMADA_370_XP_INT_CAUSE_PERF(cpu) (1 << cpu)
+
+@@ -153,6 +157,8 @@
+ static void __iomem *per_cpu_int_base;
+ static void __iomem *main_int_base;
+ static struct irq_domain *armada_370_xp_mpic_domain;
++static struct irq_domain *armada_370_xp_soc_err_domain;
++static unsigned int soc_err_irq_num_regs;
+ static u32 doorbell_mask_reg;
+ static int parent_irq;
+ #ifdef CONFIG_PCI_MSI
+@@ -163,6 +169,8 @@ static DEFINE_MUTEX(msi_used_lock);
+ static phys_addr_t msi_doorbell_addr;
+ #endif
+
++static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d);
++
+ static inline bool is_percpu_irq(irq_hw_number_t irq)
+ {
+ if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
+@@ -528,6 +536,27 @@ static void armada_xp_mpic_reenable_percpu(void)
+ armada_370_xp_irq_unmask(data);
+ }
+
++ /* Re-enable per-CPU SoC Error interrupts that were enabled before suspend */
++ for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
++ struct irq_data *data;
++ int virq;
++
++ virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
++ if (virq == 0)
++ continue;
++
++ data = irq_get_irq_data(virq);
++
++ if (!irq_percpu_is_enabled(virq))
++ continue;
++
++ armada_370_xp_soc_err_irq_unmask(data);
++ }
++
++ /* Unmask summary SoC Error Interrupt */
++ if (soc_err_irq_num_regs > 0)
++ writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
++
+ /* IPI is used only when we do not have parent irq */
+ if (parent_irq <= 0)
+ ipi_resume();
+@@ -567,8 +596,8 @@ static struct irq_chip armada_370_xp_irq_chip = {
+ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hw)
+ {
+- /* IRQs 0 and 1 cannot be mapped, they are handled internally */
+- if (hw <= 1)
++ /* IRQs 0, 1 and 4 cannot be mapped, they are handled internally */
++ if (hw <= 1 || hw == 4)
+ return -EINVAL;
+
+ armada_370_xp_irq_mask(irq_get_irq_data(virq));
+@@ -598,6 +627,98 @@ static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ };
+
++static DEFINE_RAW_SPINLOCK(armada_370_xp_soc_err_lock);
++
++static void armada_370_xp_soc_err_irq_mask(struct irq_data *d)
++{
++ irq_hw_number_t hwirq = irqd_to_hwirq(d);
++ u32 reg, mask;
++
++ reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
++ : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
++
++ raw_spin_lock(&armada_370_xp_soc_err_lock);
++ mask = readl(per_cpu_int_base + reg);
++ mask &= ~BIT(hwirq % 32);
++ writel(mask, per_cpu_int_base + reg);
++ raw_spin_unlock(&armada_370_xp_soc_err_lock);
++}
++
++static void armada_370_xp_soc_err_irq_unmask(struct irq_data *d)
++{
++ irq_hw_number_t hwirq = irqd_to_hwirq(d);
++ u32 reg, mask;
++
++ reg = hwirq >= 32 ? ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF
++ : ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF;
++
++ raw_spin_lock(&armada_370_xp_soc_err_lock);
++ mask = readl(per_cpu_int_base + reg);
++ mask |= BIT(hwirq % 32);
++ writel(mask, per_cpu_int_base + reg);
++ raw_spin_unlock(&armada_370_xp_soc_err_lock);
++}
++
++static int armada_370_xp_soc_err_irq_mask_on_cpu(void *par)
++{
++ struct irq_data *d = par;
++ armada_370_xp_soc_err_irq_mask(d);
++ return 0;
++}
++
++static int armada_370_xp_soc_err_irq_unmask_on_cpu(void *par)
++{
++ struct irq_data *d = par;
++ armada_370_xp_soc_err_irq_unmask(d);
++ return 0;
++}
++
++static int armada_xp_soc_err_irq_set_affinity(struct irq_data *d,
++ const struct cpumask *mask,
++ bool force)
++{
++ unsigned int cpu;
++
++ cpus_read_lock();
++
++ /* First disable IRQ on all cores */
++ for_each_online_cpu(cpu)
++ smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_mask_on_cpu, d, true);
++
++ /* Select a single core from the affinity mask which is online */
++ cpu = cpumask_any_and(mask, cpu_online_mask);
++ smp_call_on_cpu(cpu, armada_370_xp_soc_err_irq_unmask_on_cpu, d, true);
++
++ cpus_read_unlock();
++
++ irq_data_update_effective_affinity(d, cpumask_of(cpu));
++
++ return IRQ_SET_MASK_OK;
++}
++
++static struct irq_chip armada_370_xp_soc_err_irq_chip = {
++ .name = "MPIC SOC",
++ .irq_mask = armada_370_xp_soc_err_irq_mask,
++ .irq_unmask = armada_370_xp_soc_err_irq_unmask,
++ .irq_set_affinity = armada_xp_soc_err_irq_set_affinity,
++};
++
++static int armada_370_xp_soc_err_irq_map(struct irq_domain *h,
++ unsigned int virq, irq_hw_number_t hw)
++{
++ armada_370_xp_soc_err_irq_mask(irq_get_irq_data(virq));
++ irq_set_status_flags(virq, IRQ_LEVEL);
++ irq_set_chip_and_handler(virq, &armada_370_xp_soc_err_irq_chip,
++ handle_level_irq);
++ irq_set_probe(virq);
++ return 0;
++}
++
++static const struct irq_domain_ops armada_370_xp_soc_err_irq_ops = {
++ .map = armada_370_xp_soc_err_irq_map,
++ .xlate = irq_domain_xlate_onecell,
++};
++
+ #ifdef CONFIG_PCI_MSI
+ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
+ {
+@@ -633,6 +754,32 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
+ static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
+ #endif
+
++static void armada_370_xp_handle_soc_err_irq(void)
++{
++ unsigned long status, bit;
++ u32 mask, cause;
++
++ if (soc_err_irq_num_regs < 1)
++ return;
++
++ mask = readl(per_cpu_int_base + ARMADA_370_XP_INT_SOC_ERR_0_MASK_OFF);
++ cause = readl(main_int_base + ARMADA_370_XP_INT_SOC_ERR_0_CAUSE_OFFS);
++ status = cause & mask;
++
++ for_each_set_bit(bit, &status, 32)
++ generic_handle_domain_irq(armada_370_xp_soc_err_domain, bit);
++
++ if (soc_err_irq_num_regs < 2)
++ return;
++
++ mask = readl(per_cpu_int_base + ARMADA_370_XP_INT_SOC_ERR_1_MASK_OFF);
++ cause = readl(main_int_base + ARMADA_370_XP_INT_SOC_ERR_1_CAUSE_OFFS);
++ status = cause & mask;
++
++ for_each_set_bit(bit, &status, 32)
++ generic_handle_domain_irq(armada_370_xp_soc_err_domain, bit + 32);
++}
++
+ static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
+ {
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+@@ -658,6 +805,11 @@ static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
+ continue;
+ }
+
++ if (irqn == 4) {
++ armada_370_xp_handle_soc_err_irq();
++ continue;
++ }
++
+ generic_handle_domain_irq(armada_370_xp_mpic_domain, irqn);
+ }
+
+@@ -677,7 +829,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
+ if (irqnr > 1022)
+ break;
+
+- if (irqnr > 1) {
++ if (irqnr > 1 && irqnr != 4) {
+ handle_domain_irq(armada_370_xp_mpic_domain,
+ irqnr, regs);
+ continue;
+@@ -687,6 +839,10 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
+ if (irqnr == 1)
+ armada_370_xp_handle_msi_irq(regs, false);
+
++ /* SoC Error handling */
++ if (irqnr == 4)
++ armada_370_xp_handle_soc_err_irq();
++
+ #ifdef CONFIG_SMP
+ /* IPI Handling */
+ if (irqnr == 0) {
+@@ -750,6 +906,26 @@ static void armada_370_xp_mpic_resume(void)
+ }
+ }
+
++ /* Re-enable per-CPU SoC Error interrupts */
++ for (irq = 0; irq < soc_err_irq_num_regs * 32; irq++) {
++ struct irq_data *data;
++ int virq;
++
++ virq = irq_linear_revmap(armada_370_xp_soc_err_domain, irq);
++ if (virq == 0)
++ continue;
++
++ data = irq_get_irq_data(virq);
++
++ /*
++ * Re-enable on the current CPU,
++ * armada_xp_mpic_reenable_percpu() will take
++ * care of secondary CPUs when they come up.
++ */
++ if (irq_percpu_is_enabled(virq))
++ armada_370_xp_soc_err_irq_unmask(data);
++ }
++
+ /* Reconfigure doorbells for IPIs and MSIs */
+ writel(doorbell_mask_reg,
+ per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+@@ -768,6 +944,10 @@ static void armada_370_xp_mpic_resume(void)
+ writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ }
+
++ /* Unmask summary SoC Error Interrupt */
++ if (soc_err_irq_num_regs > 0)
++ writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
++
+ /* IPI is used only when we do not have parent irq */
+ if (parent_irq <= 0)
+ ipi_resume();
+@@ -782,6 +962,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
+ struct device_node *parent)
+ {
+ struct resource main_int_res, per_cpu_int_res;
++ struct device_node *soc_err_node;
+ int nr_irqs, i;
+ u32 control;
+
+@@ -815,6 +996,27 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
+ BUG_ON(!armada_370_xp_mpic_domain);
+ irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);
+
++ soc_err_node = of_get_next_child(node, NULL);
++ if (!soc_err_node) {
++ pr_warn("Missing SoC Error Interrupt Controller node\n");
++ pr_warn("Extended interrupts are not supported\n");
++ } else {
++ pr_info("Registering MPIC SoC Error Interrupt Controller\n");
++ /*
++ * Armada 370 and XP have only 32 SoC Error IRQs in one register
++ * and other Armada platforms have 64 IRQs in two registers.
++ */
++ soc_err_irq_num_regs =
++ of_machine_is_compatible("marvell,armada-370-xp") ? 1 : 2;
++ armada_370_xp_soc_err_domain =
++ irq_domain_add_hierarchy(armada_370_xp_mpic_domain, 0,
++ soc_err_irq_num_regs * 32,
++ soc_err_node,
++ &armada_370_xp_soc_err_irq_ops,
++ NULL);
++ BUG_ON(!armada_370_xp_soc_err_domain);
++ }
++
+ /*
+ * parent_irq is used for distinguish between IPI and non-IPI platforms.
+ * So initialize it before calling any other driver functions.
+@@ -827,6 +1029,10 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
+
+ armada_370_xp_msi_init(node, main_int_res.start);
+
++ /* Unmask summary SoC Error Interrupt */
++ if (soc_err_irq_num_regs > 0)
++ writel(4, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
++
+ if (parent_irq <= 0) {
+ irq_set_default_host(armada_370_xp_mpic_domain);
+ set_handle_irq(armada_370_xp_handle_irq);
+--
+2.34.1
+