aboutsummaryrefslogtreecommitdiff
path: root/pkgs/patches-linux-5.15/704-net-next-ethernet-marvell-mvnetaMQPrioQueue.patch
diff options
context:
space:
mode:
Diffstat (limited to 'pkgs/patches-linux-5.15/704-net-next-ethernet-marvell-mvnetaMQPrioQueue.patch')
-rw-r--r--pkgs/patches-linux-5.15/704-net-next-ethernet-marvell-mvnetaMQPrioQueue.patch97
1 files changed, 97 insertions, 0 deletions
diff --git a/pkgs/patches-linux-5.15/704-net-next-ethernet-marvell-mvnetaMQPrioQueue.patch b/pkgs/patches-linux-5.15/704-net-next-ethernet-marvell-mvnetaMQPrioQueue.patch
new file mode 100644
index 0000000..73563a5
--- /dev/null
+++ b/pkgs/patches-linux-5.15/704-net-next-ethernet-marvell-mvnetaMQPrioQueue.patch
@@ -0,0 +1,97 @@
+From e9f7099d0730341b24c057acbf545dd019581db6 Mon Sep 17 00:00:00 2001
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Date: Fri, 26 Nov 2021 12:20:55 +0100
+Subject: net: mvneta: Allow having more than one queue per TC
+
+The current mqprio implementation assumed that we are only using one
+queue per TC. Use the offset and count parameters to allow using
+multiple queues per TC. In that case, the controller will use a standard
+round-robin algorithm to pick queues assigned to the same TC, with the
+same priority.
+
+This only applies to VLAN priorities in ingress traffic, each TC
+corresponding to a vlan priority.
+
+Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 35 ++++++++++++++++++++---------------
+ 1 file changed, 20 insertions(+), 15 deletions(-)
+
+(limited to 'drivers/net/ethernet/marvell/mvneta.c')
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -493,7 +493,6 @@ struct mvneta_port {
+ u8 mcast_count[256];
+ u16 tx_ring_size;
+ u16 rx_ring_size;
+- u8 prio_tc_map[8];
+
+ phy_interface_t phy_interface;
+ struct device_node *dn;
+@@ -4936,13 +4935,12 @@ static void mvneta_clear_rx_prio_map(str
+ mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
+ }
+
+-static void mvneta_setup_rx_prio_map(struct mvneta_port *pp)
++static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
+ {
+- u32 val = 0;
+- int i;
++ u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ);
+
+- for (i = 0; i < rxq_number; i++)
+- val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]);
++ val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7);
++ val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq);
+
+ mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
+ }
+@@ -4951,8 +4949,8 @@ static int mvneta_setup_mqprio(struct ne
+ struct tc_mqprio_qopt_offload *mqprio)
+ {
+ struct mvneta_port *pp = netdev_priv(dev);
++ int rxq, tc;
+ u8 num_tc;
+- int i;
+
+ if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
+ return 0;
+@@ -4962,21 +4960,28 @@ static int mvneta_setup_mqprio(struct ne
+ if (num_tc > rxq_number)
+ return -EINVAL;
+
++ mvneta_clear_rx_prio_map(pp);
++
+ if (!num_tc) {
+- mvneta_clear_rx_prio_map(pp);
+ netdev_reset_tc(dev);
+ return 0;
+ }
+
+- memcpy(pp->prio_tc_map, mqprio->qopt.prio_tc_map,
+- sizeof(pp->prio_tc_map));
++ netdev_set_num_tc(dev, mqprio->qopt.num_tc);
+
+- mvneta_setup_rx_prio_map(pp);
++ for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
++ netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc],
++ mqprio->qopt.offset[tc]);
++
++ for (rxq = mqprio->qopt.offset[tc];
++ rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
++ rxq++) {
++ if (rxq >= rxq_number)
++ return -EINVAL;
+
+- netdev_set_num_tc(dev, mqprio->qopt.num_tc);
+- for (i = 0; i < mqprio->qopt.num_tc; i++)
+- netdev_set_tc_queue(dev, i, mqprio->qopt.count[i],
+- mqprio->qopt.offset[i]);
++ mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
++ }
++ }
+
+ return 0;
+ }