From a43608fa77213ad5ac5f75994254b9f65d57cfa0 Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Wed, 24 Oct 2018 10:27:12 +0200 Subject: [PATCH 01/61] can: raw: check for CAN FD capable netdev in raw_sendmsg() When the socket is CAN FD enabled it can handle CAN FD frame transmissions. Add an additional check in raw_sendmsg() as a CAN2.0 CAN driver (non CAN FD) should never see a CAN FD frame. Due to the commonly used can_dropped_invalid_skb() function the CAN 2.0 driver would drop that CAN FD frame anyway - but with this patch the user gets a proper -EINVAL return code. Signed-off-by: Oliver Hartkopp Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- net/can/raw.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/net/can/raw.c b/net/can/raw.c index 1051eee8258184..3aab7664933fdc 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -745,18 +745,19 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } else ifindex = ro->ifindex; - if (ro->fd_frames) { + dev = dev_get_by_index(sock_net(sk), ifindex); + if (!dev) + return -ENXIO; + + err = -EINVAL; + if (ro->fd_frames && dev->mtu == CANFD_MTU) { if (unlikely(size != CANFD_MTU && size != CAN_MTU)) - return -EINVAL; + goto put_dev; } else { if (unlikely(size != CAN_MTU)) - return -EINVAL; + goto put_dev; } - dev = dev_get_by_index(sock_net(sk), ifindex); - if (!dev) - return -ENXIO; - skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv), msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) From 95217260649aa504eb5d4a0d50959ca4e67c8f96 Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Mon, 6 Aug 2018 15:14:50 +0200 Subject: [PATCH 02/61] can: kvaser_usb: Fix potential uninitialized variable use If alloc_can_err_skb() fails, cf is never initialized. Move assignment of cf inside check. Reported-by: Dan Carpenter Signed-off-by: Jimmy Assarsson Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index c084bae5ec0a4d..5fc0be56427437 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -1019,6 +1019,11 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv, new_state : CAN_STATE_ERROR_ACTIVE; can_change_state(netdev, cf, tx_state, rx_state); + + if (priv->can.restart_ms && + old_state >= CAN_STATE_BUS_OFF && + new_state < CAN_STATE_BUS_OFF) + cf->can_id |= CAN_ERR_RESTARTED; } if (new_state == CAN_STATE_BUS_OFF) { @@ -1028,11 +1033,6 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv, can_bus_off(netdev); } - - if (priv->can.restart_ms && - old_state >= CAN_STATE_BUS_OFF && - new_state < CAN_STATE_BUS_OFF) - cf->can_id |= CAN_ERR_RESTARTED; } if (!skb) { From e13fb9b37cc00616b90df2d620f30345b5ada6ff Mon Sep 17 00:00:00 2001 From: Jimmy Assarsson Date: Mon, 6 Aug 2018 15:14:49 +0200 Subject: [PATCH 03/61] can: kvaser_usb: Fix accessing freed memory in kvaser_usb_start_xmit() The call to can_put_echo_skb() may result in the skb being freed. The skb is later used in the call to dev->ops->dev_frame_to_cmd(). This is avoided by moving the call to can_put_echo_skb() after dev->ops->dev_frame_to_cmd(). Reported-by: Dan Carpenter Signed-off-by: Jimmy Assarsson Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index b939a4c10b8409..c89c7d4900d750 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -528,7 +528,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, context = &priv->tx_contexts[i]; context->echo_index = i; - can_put_echo_skb(skb, netdev, context->echo_index); ++priv->active_tx_contexts; if (priv->active_tx_contexts >= (int)dev->max_tx_urbs) netif_stop_queue(netdev); @@ -553,7 +552,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, dev_kfree_skb(skb); spin_lock_irqsave(&priv->tx_contexts_lock, flags); - can_free_echo_skb(netdev, context->echo_index); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; netif_wake_queue(netdev); @@ -564,6 +562,8 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, context->priv = priv; + can_put_echo_skb(skb, netdev, context->echo_index); + usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), From 207681fc5f3d5d398f106d1ae0080fc2373f707a Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 29 Aug 2018 01:46:54 +0000 Subject: [PATCH 04/61] can: ucan: remove set but not used variable 'udev' Fixes gcc '-Wunused-but-set-variable' warning: drivers/net/can/usb/ucan.c: In function 'ucan_disconnect': drivers/net/can/usb/ucan.c:1578:21: warning: variable 'udev' set but not used [-Wunused-but-set-variable] struct usb_device *udev; Signed-off-by: YueHaibing Reviewed-by: Martin Elshuber Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/ucan.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index 0678a38b1af458..c9fd83e8d9477d 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -1575,11 +1575,8 @@ static int ucan_probe(struct usb_interface *intf, /* disconnect the device */ static void ucan_disconnect(struct usb_interface *intf) { - struct usb_device *udev; struct ucan_priv *up = usb_get_intfdata(intf); - udev = interface_to_usbdev(intf); - usb_set_intfdata(intf, NULL); if (up) { From ff1f19d56c200b35eb07cfa6668aa6dcac198cec Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 29 Aug 2018 01:25:45 +0000 Subject: [PATCH 05/61] can: ucan: remove duplicated include from ucan.c Remove duplicated include. Signed-off-by: YueHaibing Reviewed-by: Martin Elshuber Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/ucan.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index c9fd83e8d9477d..f3d5bda012a107 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -35,10 +35,6 @@ #include #include -#include -#include -#include - #define UCAN_DRIVER_NAME "ucan" #define UCAN_MAX_RX_URBS 8 /* the CAN controller needs a while to enable/disable the bus */ From 4f145f14f6b98b5aa0dd91bdae518b3f24f74b37 Mon Sep 17 00:00:00 2001 From: Eugeniu Rosca Date: Mon, 20 Aug 2018 16:49:10 +0200 Subject: [PATCH 06/61] dt-bindings: can: rcar_can: document r8a77965 support Document the support for rcar_can on R8A77965 SoC devices. Add R8A77965 to the list of SoCs which require the "assigned-clocks" and "assigned-clock-rates" properties (thanks, Sergei). Signed-off-by: Eugeniu Rosca Reviewed-by: Simon Horman Reviewed-by: Kieran Bingham Reviewed-by: Rob Herring Signed-off-by: Marc Kleine-Budde --- Documentation/devicetree/bindings/net/can/rcar_can.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt index cc4372842bf376..47fc68148f38e6 100644 --- a/Documentation/devicetree/bindings/net/can/rcar_can.txt +++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt @@ -14,6 +14,7 @@ Required properties: "renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC. "renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC. "renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC. + "renesas,can-r8a77965" if CAN controller is a part of R8A77965 SoC. "renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device. "renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1 compatible device. @@ -29,11 +30,10 @@ Required properties: - pinctrl-0: pin control group to be used for this controller. - pinctrl-names: must be "default". -Required properties for "renesas,can-r8a7795" and "renesas,can-r8a7796" -compatible: -In R8A7795 and R8A7796 SoCs, "clkp2" can be CANFD clock. This is a div6 clock -and can be used by both CAN and CAN FD controller at the same time. It needs to -be scaled to maximum frequency if any of these controllers use it. This is done +Required properties for R8A7795, R8A7796 and R8A77965: +For the denoted SoCs, "clkp2" can be CANFD clock. This is a div6 clock and can +be used by both CAN and CAN FD controller at the same time. It needs to be +scaled to maximum frequency if any of these controllers use it. This is done using the below properties: - assigned-clocks: phandle of clkp2(CANFD) clock. From 68c8d209cd4337da4fa04c672f0b62bb735969bc Mon Sep 17 00:00:00 2001 From: Fabrizio Castro Date: Mon, 10 Sep 2018 11:43:13 +0100 Subject: [PATCH 07/61] can: rcar_can: Fix erroneous registration Assigning 2 to "renesas,can-clock-select" tricks the driver into registering the CAN interface, even though we don't want that. This patch improves one of the checks to prevent that from happening. Fixes: 862e2b6af9413b43 ("can: rcar_can: support all input clocks") Signed-off-by: Fabrizio Castro Signed-off-by: Chris Paterson Reviewed-by: Simon Horman Signed-off-by: Marc Kleine-Budde --- drivers/net/can/rcar/rcar_can.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 11662f479e760b..771a4608373978 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -24,6 +24,9 @@ #define RCAR_CAN_DRV_NAME "rcar_can" +#define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \ + BIT(CLKR_CLKEXT)) + /* Mailbox configuration: * mailbox 60 - 63 - Rx FIFO mailboxes * mailbox 56 - 59 - Tx FIFO mailboxes @@ -789,7 +792,7 @@ static int rcar_can_probe(struct platform_device *pdev) goto fail_clk; } - if (clock_select >= ARRAY_SIZE(clock_names)) { + if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) { err = -EINVAL; dev_err(&pdev->dev, "invalid CAN clock selected\n"); goto fail_clk; From 868b7c0f43e61f227bf3d7f7d6134bb3c67bb0e8 Mon Sep 17 00:00:00 2001 From: Fabrizio Castro Date: Mon, 10 Sep 2018 11:43:14 +0100 Subject: [PATCH 08/61] dt-bindings: can: rcar_can: Add r8a774a1 support Document RZ/G2M (r8a774a1) SoC specific bindings. Signed-off-by: Fabrizio Castro Signed-off-by: Chris Paterson Reviewed-by: Biju Das Reviewed-by: Rob Herring Reviewed-by: Simon Horman Signed-off-by: Marc Kleine-Budde --- .../devicetree/bindings/net/can/rcar_can.txt | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt index 47fc68148f38e6..9936b9ee67c366 100644 --- a/Documentation/devicetree/bindings/net/can/rcar_can.txt +++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt @@ -5,6 +5,7 @@ Required properties: - compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC. "renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC. "renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC. + "renesas,can-r8a774a1" if CAN controller is a part of R8A774A1 SoC. "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC. "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC. "renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC. @@ -18,15 +19,21 @@ Required properties: "renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device. "renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1 compatible device. - "renesas,rcar-gen3-can" for a generic R-Car Gen3 compatible device. + "renesas,rcar-gen3-can" for a generic R-Car Gen3 or RZ/G2 + compatible device. When compatible with the generic version, nodes must list the SoC-specific version corresponding to the platform first followed by the generic version. - reg: physical base address and size of the R-Car CAN register map. - interrupts: interrupt specifier for the sole interrupt. -- clocks: phandles and clock specifiers for 3 CAN clock inputs. -- clock-names: 3 clock input name strings: "clkp1", "clkp2", "can_clk". +- clocks: phandles and clock specifiers for 2 CAN clock inputs for RZ/G2 + devices. + phandles and clock specifiers for 3 CAN clock inputs for every other + SoC. +- clock-names: 2 clock input name strings for RZ/G2: "clkp1", "can_clk". + 3 clock input name strings for every other SoC: "clkp1", "clkp2", + "can_clk". - pinctrl-0: pin control group to be used for this controller. - pinctrl-names: must be "default". @@ -42,8 +49,9 @@ using the below properties: Optional properties: - renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are: <0x0> (default) : Peripheral clock (clkp1) - <0x1> : Peripheral clock (clkp2) - <0x3> : Externally input clock + <0x1> : Peripheral clock (clkp2) (not supported by + RZ/G2 devices) + <0x3> : External input clock Example ------- From f164d0204b1156a7e0d8d1622c1a8d25752befec Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 27 Oct 2018 10:36:54 +0200 Subject: [PATCH 09/61] can: hi311x: Use level-triggered interrupt If the hi3110 shares the SPI bus with another traffic-intensive device and packets are received in high volume (by a separate machine sending with "cangen -g 0 -i -x"), reception stops after a few minutes and the counter in /proc/interrupts stops incrementing. Bus state is "active". Bringing the interface down and back up reconvenes the reception. The issue is not observed when the hi3110 is the sole device on the SPI bus. Using a level-triggered interrupt makes the issue go away and lets the hi3110 successfully receive 2 GByte over the course of 5 days while a ks8851 Ethernet chip on the same SPI bus handles 6 GByte of traffic. Unfortunately the hi3110 datasheet is mum on the trigger type. The pin description on page 3 only specifies the polarity (active high): http://www.holtic.com/documents/371-hi-3110_v-rev-kpdf.do Cc: Mathias Duckeck Cc: Akshay Bhat Cc: Casey Fitzpatrick Signed-off-by: Lukas Wunner Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- Documentation/devicetree/bindings/net/can/holt_hi311x.txt | 2 +- drivers/net/can/spi/hi311x.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt index 903a78da65be28..3a9926f9993703 100644 --- a/Documentation/devicetree/bindings/net/can/holt_hi311x.txt +++ b/Documentation/devicetree/bindings/net/can/holt_hi311x.txt @@ -17,7 +17,7 @@ Example: reg = <1>; clocks = <&clk32m>; interrupt-parent = <&gpio4>; - interrupts = <13 IRQ_TYPE_EDGE_RISING>; + interrupts = <13 IRQ_TYPE_LEVEL_HIGH>; vdd-supply = <®5v0>; xceiver-supply = <®5v0>; }; diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 53e320c92a8be2..ddaf46239e39e9 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -760,7 +760,7 @@ static int hi3110_open(struct net_device *net) { struct hi3110_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; - unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING; + unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_HIGH; int ret; ret = open_candev(net); From 5178b7cd8e42448b1041716f124734eaaa36ca50 Mon Sep 17 00:00:00 2001 From: Pankaj Bansal Date: Wed, 1 Aug 2018 19:36:46 +0530 Subject: [PATCH 10/61] can: flexcan: Unlock the MB unconditionally Unlock the MB irrespective of reception method being FIFO or timestamp based. It is optional but recommended to unlock Mailbox as soon as possible and make it available for reception. Reported-by: Alexander Stein Signed-off-by: Pankaj Bansal Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 8e972ef0863769..0431f8d05518ad 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -720,9 +720,14 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, priv->write(BIT(n - 32), ®s->iflag2); } else { priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); - priv->read(®s->timer); } + /* Read the Free Running Timer. It is optional but recommended + * to unlock Mailbox as soon as possible and make it available + * for reception. + */ + priv->read(®s->timer); + return 1; } From cbffaf7aa09edbaea2bc7dc440c945297095e2fd Mon Sep 17 00:00:00 2001 From: Alexander Stein Date: Thu, 11 Oct 2018 17:01:25 +0200 Subject: [PATCH 11/61] can: flexcan: Always use last mailbox for TX Essentially this patch moves the TX mailbox to position 63, regardless of timestamp based offloading or RX FIFO. So mainly the iflag register usage regarding TX has changed. The rest is consolidating RX FIFO and timestamp offloading as they now use both the same TX mailbox. The reason is a very annoying behavior regarding sending RTR frames when _not_ using RX FIFO: If a TX mailbox sent a RTR frame it becomes a RX mailbox. For that reason flexcan_irq disables the TX mailbox again. But if during the time the RTR was sent and the TX mailbox is disabled a new CAN frames is received, it is lost without notice. The reason is that so-called "Move-in" process starts from the lowest mailbox which happen to be a TX mailbox set to EMPTY. Steps to reproduce (I used an imx7d): 1. generate regular bursts of messages 2. send a RTR from flexcan with higher priority than burst messages every 1ms, e.g. cangen -I 0x100 -L 0 -g 1 -R can0 3. notice a lost message without notification after some seconds When running an iperf in parallel this problem is occurring even more frequently. Using filters is not possible as at least one single CAN-ID is allowed. Handling the TX MB during RX is also not possible as there is no race-free disable of RX MB. There is still a slight window when the described problem can occur. But for that all RX MB must be in use which is essentially next to an overrun. Still there will be no indication if it ever occurs. Signed-off-by: Alexander Stein Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 67 +++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 0431f8d05518ad..677c41701cf3c3 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -135,13 +135,12 @@ /* FLEXCAN interrupt flag register (IFLAG) bits */ /* Errata ERR005829 step7: Reserve first valid MB */ -#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 -#define FLEXCAN_TX_MB_OFF_FIFO 9 +#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 #define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 -#define FLEXCAN_TX_MB_OFF_TIMESTAMP 1 -#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1) -#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST 63 -#define FLEXCAN_IFLAG_MB(x) BIT(x) +#define FLEXCAN_TX_MB 63 +#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1) +#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST (FLEXCAN_TX_MB - 1) +#define FLEXCAN_IFLAG_MB(x) BIT(x & 0x1f) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) @@ -737,9 +736,9 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) struct flexcan_regs __iomem *regs = priv->regs; u32 iflag1, iflag2; - iflag2 = priv->read(®s->iflag2) & priv->reg_imask2_default; - iflag1 = priv->read(®s->iflag1) & priv->reg_imask1_default & + iflag2 = priv->read(®s->iflag2) & priv->reg_imask2_default & ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + iflag1 = priv->read(®s->iflag1) & priv->reg_imask1_default; return (u64)iflag2 << 32 | iflag1; } @@ -751,11 +750,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; irqreturn_t handled = IRQ_NONE; - u32 reg_iflag1, reg_esr; + u32 reg_iflag2, reg_esr; enum can_state last_state = priv->can.state; - reg_iflag1 = priv->read(®s->iflag1); - /* reception interrupt */ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { u64 reg_iflag; @@ -769,6 +766,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) break; } } else { + u32 reg_iflag1; + + reg_iflag1 = priv->read(®s->iflag1); if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) { handled = IRQ_HANDLED; can_rx_offload_irq_offload_fifo(&priv->offload); @@ -784,8 +784,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) } } + reg_iflag2 = priv->read(®s->iflag2); + /* transmission complete interrupt */ - if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { + if (reg_iflag2 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { handled = IRQ_HANDLED; stats->tx_bytes += can_get_echo_skb(dev, 0); stats->tx_packets++; @@ -794,7 +796,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) /* after sending a RTR frame MB is in RX mode */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, &priv->tx_mb->can_ctrl); - priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), ®s->iflag1); + priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), ®s->iflag2); netif_wake_queue(dev); } @@ -936,15 +938,13 @@ static int flexcan_chip_start(struct net_device *dev) reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ | - FLEXCAN_MCR_IDAM_C; + FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) reg_mcr &= ~FLEXCAN_MCR_FEN; - reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last); - } else { - reg_mcr |= FLEXCAN_MCR_FEN | - FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); - } + else + reg_mcr |= FLEXCAN_MCR_FEN; + netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); priv->write(reg_mcr, ®s->mcr); @@ -987,16 +987,17 @@ static int flexcan_chip_start(struct net_device *dev) priv->write(reg_ctrl2, ®s->ctrl2); } - /* clear and invalidate all mailboxes first */ - for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) { - priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, - ®s->mb[i].can_ctrl); - } - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { - for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) + for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) { priv->write(FLEXCAN_MB_CODE_RX_EMPTY, ®s->mb[i].can_ctrl); + } + } else { + /* clear and invalidate unused mailboxes first */ + for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) { + priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, + ®s->mb[i].can_ctrl); + } } /* Errata ERR005829: mark first TX mailbox as INACTIVE */ @@ -1360,17 +1361,15 @@ static int flexcan_probe(struct platform_device *pdev) priv->devtype_data = devtype_data; priv->reg_xceiver = reg_xceiver; - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { - priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP; + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP]; - } else { - priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO; + else priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; - } + priv->tx_mb_idx = FLEXCAN_TX_MB; priv->tx_mb = ®s->mb[priv->tx_mb_idx]; - priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); - priv->reg_imask2_default = 0; + priv->reg_imask1_default = 0; + priv->reg_imask2_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); priv->offload.mailbox_read = flexcan_mailbox_read; From e05237f9da42ee52e73acea0bb082d788e111229 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Fri, 9 Nov 2018 15:01:50 +0100 Subject: [PATCH 12/61] can: flexcan: remove not needed struct flexcan_priv::tx_mb and struct flexcan_priv::tx_mb_idx The previous patch changes the TX path to always use the last mailbox regardless of the used offload scheme (rx-fifo or timestamp based). This means members "tx_mb" and "tx_mb_idx" of the struct flexcan_priv don't depend on the offload scheme, so replace them by compile time constants. Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 677c41701cf3c3..68b46395c580c8 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -258,9 +258,7 @@ struct flexcan_priv { struct can_rx_offload offload; struct flexcan_regs __iomem *regs; - struct flexcan_mb __iomem *tx_mb; struct flexcan_mb __iomem *tx_mb_reserved; - u8 tx_mb_idx; u32 reg_ctrl_default; u32 reg_imask1_default; u32 reg_imask2_default; @@ -514,6 +512,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev, static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; struct can_frame *cf = (struct can_frame *)skb->data; u32 can_id; u32 data; @@ -536,17 +535,17 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de if (cf->can_dlc > 0) { data = be32_to_cpup((__be32 *)&cf->data[0]); - priv->write(data, &priv->tx_mb->data[0]); + priv->write(data, ®s->mb[FLEXCAN_TX_MB].data[0]); } if (cf->can_dlc > 4) { data = be32_to_cpup((__be32 *)&cf->data[4]); - priv->write(data, &priv->tx_mb->data[1]); + priv->write(data, ®s->mb[FLEXCAN_TX_MB].data[1]); } can_put_echo_skb(skb, dev, 0); - priv->write(can_id, &priv->tx_mb->can_id); - priv->write(ctrl, &priv->tx_mb->can_ctrl); + priv->write(can_id, ®s->mb[FLEXCAN_TX_MB].can_id); + priv->write(ctrl, ®s->mb[FLEXCAN_TX_MB].can_ctrl); /* Errata ERR005829 step8: * Write twice INACTIVE(0x8) code to first MB. @@ -737,7 +736,7 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) u32 iflag1, iflag2; iflag2 = priv->read(®s->iflag2) & priv->reg_imask2_default & - ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + ~FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB); iflag1 = priv->read(®s->iflag1) & priv->reg_imask1_default; return (u64)iflag2 << 32 | iflag1; @@ -787,7 +786,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) reg_iflag2 = priv->read(®s->iflag2); /* transmission complete interrupt */ - if (reg_iflag2 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { + if (reg_iflag2 & FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB)) { handled = IRQ_HANDLED; stats->tx_bytes += can_get_echo_skb(dev, 0); stats->tx_packets++; @@ -795,8 +794,8 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) /* after sending a RTR frame MB is in RX mode */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, - &priv->tx_mb->can_ctrl); - priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), ®s->iflag2); + ®s->mb[FLEXCAN_TX_MB].can_ctrl); + priv->write(FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB), ®s->iflag2); netif_wake_queue(dev); } @@ -938,7 +937,7 @@ static int flexcan_chip_start(struct net_device *dev) reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ | - FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); + FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_MB); if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) reg_mcr &= ~FLEXCAN_MCR_FEN; @@ -1006,7 +1005,7 @@ static int flexcan_chip_start(struct net_device *dev) /* mark TX mailbox as INACTIVE */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, - &priv->tx_mb->can_ctrl); + ®s->mb[FLEXCAN_TX_MB].can_ctrl); /* acceptance mask/acceptance code (accept everything) */ priv->write(0x0, ®s->rxgmask); @@ -1365,11 +1364,9 @@ static int flexcan_probe(struct platform_device *pdev) priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP]; else priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; - priv->tx_mb_idx = FLEXCAN_TX_MB; - priv->tx_mb = ®s->mb[priv->tx_mb_idx]; priv->reg_imask1_default = 0; - priv->reg_imask2_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + priv->reg_imask2_default = FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB); priv->offload.mailbox_read = flexcan_mailbox_read; From a4310fa2f24687888ce80fdb0e88583561a23700 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 31 Oct 2018 10:37:46 +0100 Subject: [PATCH 13/61] can: dev: can_get_echo_skb(): factor out non sending code to __can_get_echo_skb() This patch factors out all non sending parts of can_get_echo_skb() into a seperate function __can_get_echo_skb(), so that it can be re-used in an upcoming patch. Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev.c | 36 +++++++++++++++++++++++++----------- include/linux/can/dev.h | 1 + 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 49163570a63afa..80530ab37b1e3d 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -477,14 +477,7 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, } EXPORT_SYMBOL_GPL(can_put_echo_skb); -/* - * Get the skb from the stack and loop it back locally - * - * The function is typically called when the TX done interrupt - * is handled in the device driver. The driver must protect - * access to priv->echo_skb, if necessary. - */ -unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) { struct can_priv *priv = netdev_priv(dev); @@ -495,13 +488,34 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) struct can_frame *cf = (struct can_frame *)skb->data; u8 dlc = cf->can_dlc; - netif_rx(priv->echo_skb[idx]); + *len_ptr = dlc; priv->echo_skb[idx] = NULL; - return dlc; + return skb; } - return 0; + return NULL; +} + +/* + * Get the skb from the stack and loop it back locally + * + * The function is typically called when the TX done interrupt + * is handled in the device driver. The driver must protect + * access to priv->echo_skb, if necessary. + */ +unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) +{ + struct sk_buff *skb; + u8 len; + + skb = __can_get_echo_skb(dev, idx, &len); + if (!skb) + return 0; + + netif_rx(skb); + + return len; } EXPORT_SYMBOL_GPL(can_get_echo_skb); diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index a83e1f632eb70e..f01623aef2f779 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -169,6 +169,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, unsigned int idx); +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr); unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); void can_free_echo_skb(struct net_device *dev, unsigned int idx); From 200f5c49f7a2cd694436bfc6cb0662b794c96736 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 31 Oct 2018 11:08:21 +0100 Subject: [PATCH 14/61] can: dev: __can_get_echo_skb(): replace struct can_frame by canfd_frame to access frame length This patch replaces the use of "struct can_frame::can_dlc" by "struct canfd_frame::len" to access the frame's length. As it is ensured that both structures have a compatible memory layout for this member this is no functional change. Futher, this compatibility is documented in a comment. Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 80530ab37b1e3d..46cc5fec4043d9 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -484,11 +484,14 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 BUG_ON(idx >= priv->echo_skb_max); if (priv->echo_skb[idx]) { + /* Using "struct canfd_frame::len" for the frame + * length is supported on both CAN and CANFD frames. + */ struct sk_buff *skb = priv->echo_skb[idx]; - struct can_frame *cf = (struct can_frame *)skb->data; - u8 dlc = cf->can_dlc; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u8 len = cf->len; - *len_ptr = dlc; + *len_ptr = len; priv->echo_skb[idx] = NULL; return skb; From e7a6994d043a1e31d5b17706a22ce33d2a3e4cdc Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 31 Oct 2018 14:05:26 +0100 Subject: [PATCH 15/61] can: dev: __can_get_echo_skb(): Don't crash the kernel if can_priv::echo_skb is accessed out of bounds If the "struct can_priv::echo_skb" is accessed out of bounds would lead to a kernel crash. Better print a sensible warning message instead and try to recover. Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 46cc5fec4043d9..c05e4d50d43d74 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -481,7 +481,11 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 { struct can_priv *priv = netdev_priv(dev); - BUG_ON(idx >= priv->echo_skb_max); + if (idx >= priv->echo_skb_max) { + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", + __func__, idx, priv->echo_skb_max); + return NULL; + } if (priv->echo_skb[idx]) { /* Using "struct canfd_frame::len" for the frame From 7da11ba5c5066dadc2e96835a6233d56d7b7764a Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 31 Oct 2018 14:15:13 +0100 Subject: [PATCH 16/61] can: dev: __can_get_echo_skb(): print error message, if trying to echo non existing skb Prior to echoing a successfully transmitted CAN frame (by calling can_get_echo_skb()), CAN drivers have to put the CAN frame (by calling can_put_echo_skb() in the transmit function). These put and get function take an index as parameter, which is used to identify the CAN frame. A driver calling can_get_echo_skb() with a index not pointing to a skb is a BUG, so add an appropriate error message. Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index c05e4d50d43d74..3b3f88ffab53cd 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -480,6 +480,8 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb); struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) { struct can_priv *priv = netdev_priv(dev); + struct sk_buff *skb = priv->echo_skb[idx]; + struct canfd_frame *cf; if (idx >= priv->echo_skb_max) { netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", @@ -487,21 +489,20 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 return NULL; } - if (priv->echo_skb[idx]) { - /* Using "struct canfd_frame::len" for the frame - * length is supported on both CAN and CANFD frames. - */ - struct sk_buff *skb = priv->echo_skb[idx]; - struct canfd_frame *cf = (struct canfd_frame *)skb->data; - u8 len = cf->len; - - *len_ptr = len; - priv->echo_skb[idx] = NULL; - - return skb; + if (!skb) { + netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n", + __func__, idx); + return NULL; } - return NULL; + /* Using "struct canfd_frame::len" for the frame + * length is supported on both CAN and CANFD frames. + */ + cf = (struct canfd_frame *)skb->data; + *len_ptr = cf->len; + priv->echo_skb[idx] = NULL; + + return skb; } /* From 55059f2b7f868cd43b3ad30e28e18347e1b46ace Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Tue, 18 Sep 2018 11:40:38 +0200 Subject: [PATCH 17/61] can: rx-offload: introduce can_rx_offload_get_echo_skb() and can_rx_offload_queue_sorted() functions Current CAN framework can't guarantee proper/chronological order of RX and TX-ECHO messages. To make this possible, drivers should use this functions instead of can_get_echo_skb(). Signed-off-by: Oleksij Rempel Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- drivers/net/can/rx-offload.c | 46 ++++++++++++++++++++++++++++++++++ include/linux/can/rx-offload.h | 4 +++ 2 files changed, 50 insertions(+) diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c index c7d05027a7a07e..c368686e216455 100644 --- a/drivers/net/can/rx-offload.c +++ b/drivers/net/can/rx-offload.c @@ -211,6 +211,52 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) } EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); +int can_rx_offload_queue_sorted(struct can_rx_offload *offload, + struct sk_buff *skb, u32 timestamp) +{ + struct can_rx_offload_cb *cb; + unsigned long flags; + + if (skb_queue_len(&offload->skb_queue) > + offload->skb_queue_len_max) + return -ENOMEM; + + cb = can_rx_offload_get_cb(skb); + cb->timestamp = timestamp; + + spin_lock_irqsave(&offload->skb_queue.lock, flags); + __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare); + spin_unlock_irqrestore(&offload->skb_queue.lock, flags); + + can_rx_offload_schedule(offload); + + return 0; +} +EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted); + +unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, + unsigned int idx, u32 timestamp) +{ + struct net_device *dev = offload->dev; + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + u8 len; + int err; + + skb = __can_get_echo_skb(dev, idx, &len); + if (!skb) + return 0; + + err = can_rx_offload_queue_sorted(offload, skb, timestamp); + if (err) { + stats->rx_errors++; + stats->tx_fifo_errors++; + } + + return len; +} +EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb); + int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb) { if (skb_queue_len(&offload->skb_queue) > diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h index cb31683bbe154a..01a7c9e5d8d85f 100644 --- a/include/linux/can/rx-offload.h +++ b/include/linux/can/rx-offload.h @@ -41,6 +41,10 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload * int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight); int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg); int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); +int can_rx_offload_queue_sorted(struct can_rx_offload *offload, + struct sk_buff *skb, u32 timestamp); +unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, + unsigned int idx, u32 timestamp); int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb); void can_rx_offload_reset(struct can_rx_offload *offload); void can_rx_offload_del(struct can_rx_offload *offload); From ed72bc8bcb9277061e753faf300b20f97323761c Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Tue, 18 Sep 2018 11:40:39 +0200 Subject: [PATCH 18/61] can: flexcan: handle tx-complete CAN frames via rx-offload infrastructure Current flexcan driver will put TX-ECHO in regular unsorted way, in this case TX-ECHO can come after the response to the same TXed message. In some cases, for example for J1939 stack, things will break. This patch is using new rx-offload API to put the messages just in the right place. Signed-off-by: Oleksij Rempel Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 68b46395c580c8..41a175f80c4b5a 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -787,8 +787,11 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) /* transmission complete interrupt */ if (reg_iflag2 & FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB)) { + u32 reg_ctrl = priv->read(®s->mb[FLEXCAN_TX_MB].can_ctrl); + handled = IRQ_HANDLED; - stats->tx_bytes += can_get_echo_skb(dev, 0); + stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload, + 0, reg_ctrl << 16); stats->tx_packets++; can_led_event(dev, CAN_LED_EVENT_TX); From 4530ec36bb1e0d24f41c33229694adacda3d5d89 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Tue, 18 Sep 2018 11:40:40 +0200 Subject: [PATCH 19/61] can: rx-offload: rename can_rx_offload_irq_queue_err_skb() to can_rx_offload_queue_tail() This function has nothing todo with error. Signed-off-by: Oleksij Rempel Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 4 ++-- drivers/net/can/rx-offload.c | 5 +++-- include/linux/can/rx-offload.h | 3 ++- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 41a175f80c4b5a..5923bd0ec11830 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -610,7 +610,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) if (tx_errors) dev->stats.tx_errors++; - can_rx_offload_irq_queue_err_skb(&priv->offload, skb); + can_rx_offload_queue_tail(&priv->offload, skb); } static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) @@ -650,7 +650,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) if (unlikely(new_state == CAN_STATE_BUS_OFF)) can_bus_off(dev); - can_rx_offload_irq_queue_err_skb(&priv->offload, skb); + can_rx_offload_queue_tail(&priv->offload, skb); } static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c index c368686e216455..2ce4fa8698c73b 100644 --- a/drivers/net/can/rx-offload.c +++ b/drivers/net/can/rx-offload.c @@ -257,7 +257,8 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, } EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb); -int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb) +int can_rx_offload_queue_tail(struct can_rx_offload *offload, + struct sk_buff *skb) { if (skb_queue_len(&offload->skb_queue) > offload->skb_queue_len_max) @@ -268,7 +269,7 @@ int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_b return 0; } -EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb); +EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail); static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) { diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h index 01a7c9e5d8d85f..8268811a697e25 100644 --- a/include/linux/can/rx-offload.h +++ b/include/linux/can/rx-offload.h @@ -45,7 +45,8 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload, struct sk_buff *skb, u32 timestamp); unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, unsigned int idx, u32 timestamp); -int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb); +int can_rx_offload_queue_tail(struct can_rx_offload *offload, + struct sk_buff *skb); void can_rx_offload_reset(struct can_rx_offload *offload); void can_rx_offload_del(struct can_rx_offload *offload); void can_rx_offload_enable(struct can_rx_offload *offload); From d788905f68fd4714c82936f6f7f1d3644d7ae7ef Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Tue, 18 Sep 2018 11:40:41 +0200 Subject: [PATCH 20/61] can: flexcan: use can_rx_offload_queue_sorted() for flexcan_irq_bus_*() Currently, in case of bus error, driver will generate error message and put in the tail of the message queue. To avoid confusions, this change should place the bus related messages in proper order. Signed-off-by: Oleksij Rempel Cc: linux-stable Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 5923bd0ec11830..75ce11395ee819 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -561,9 +561,13 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; struct sk_buff *skb; struct can_frame *cf; bool rx_errors = false, tx_errors = false; + u32 timestamp; + + timestamp = priv->read(®s->timer) << 16; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) @@ -610,17 +614,21 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) if (tx_errors) dev->stats.tx_errors++; - can_rx_offload_queue_tail(&priv->offload, skb); + can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); } static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; struct sk_buff *skb; struct can_frame *cf; enum can_state new_state, rx_state, tx_state; int flt; struct can_berr_counter bec; + u32 timestamp; + + timestamp = priv->read(®s->timer) << 16; flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { @@ -650,7 +658,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) if (unlikely(new_state == CAN_STATE_BUS_OFF)) can_bus_off(dev); - can_rx_offload_queue_tail(&priv->offload, skb); + can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); } static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) From f4156f9656feac21f4de712fac94fae964c5d402 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Tue, 30 Oct 2018 12:17:10 +0100 Subject: [PATCH 21/61] batman-adv: Use explicit tvlv padding for ELP packets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The announcement messages of batman-adv COMPAT_VERSION 15 have the possibility to announce additional information via a dynamic TVLV part. This part is optional for the ELP packets and currently not parsed by the Linux implementation. Still out-of-tree versions are using it to transport things like neighbor hashes to optimize the rebroadcast behavior. Since the ELP broadcast packets are smaller than the minimal ethernet packet, it often has to be padded. This is often done (as specified in RFC894) with octets of zero and thus work perfectly fine with the TVLV part (making it a zero length and thus empty). But not all ethernet compatible hardware seems to follow this advice. To avoid ambiguous situations when parsing the TVLV header, just force the 4 bytes (TVLV length + padding) after the required ELP header to zero. Fixes: d6f94d91f766 ("batman-adv: ELP - adding basic infrastructure") Reported-by: Linus Lüssing Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/bat_v_elp.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index 9f481cfdf77dac..e8090f099eb805 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -352,19 +352,21 @@ static void batadv_v_elp_periodic_work(struct work_struct *work) */ int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface) { + static const size_t tvlv_padding = sizeof(__be32); struct batadv_elp_packet *elp_packet; unsigned char *elp_buff; u32 random_seqno; size_t size; int res = -ENOMEM; - size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN; + size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding; hard_iface->bat_v.elp_skb = dev_alloc_skb(size); if (!hard_iface->bat_v.elp_skb) goto out; skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN); - elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN); + elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb, + BATADV_ELP_HLEN + tvlv_padding); elp_packet = (struct batadv_elp_packet *)elp_buff; elp_packet->packet_type = BATADV_ELP; From d7d8bbb40a5b1f682ee6589e212934f4c6b8ad60 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 7 Nov 2018 23:09:12 +0100 Subject: [PATCH 22/61] batman-adv: Expand merged fragment buffer for full packet MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The complete size ("total_size") of the fragmented packet is stored in the fragment header and in the size of the fragment chain. When the fragments are ready for merge, the skbuff's tail of the first fragment is expanded to have enough room after the data pointer for at least total_size. This means that it gets expanded by total_size - first_skb->len. But this is ignoring the fact that after expanding the buffer, the fragment header is pulled by from this buffer. Assuming that the tailroom of the buffer was already 0, the buffer after the data pointer of the skbuff is now only total_size - len(fragment_header) large. When the merge function is then processing the remaining fragments, the code to copy the data over to the merged skbuff will cause an skb_over_panic when it tries to actually put enough data to fill the total_size bytes of the packet. The size of the skb_pull must therefore also be taken into account when the buffer's tailroom is expanded. Fixes: 610bfc6bc99b ("batman-adv: Receive fragmented packets and merge") Reported-by: Martin Weinelt Co-authored-by: Linus Lüssing Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/fragmentation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 0fddc17106bd8a..5b71a289d04fc8 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -275,7 +275,7 @@ batadv_frag_merge_packets(struct hlist_head *chain) kfree(entry); packet = (struct batadv_frag_packet *)skb_out->data; - size = ntohs(packet->total_size); + size = ntohs(packet->total_size) + hdr_size; /* Make room for the rest of the fragments. */ if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) { From 77e461d14ed141253573eeeb4d34eccc51e38328 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sun, 11 Nov 2018 18:27:34 -0800 Subject: [PATCH 23/61] bnx2x: Assign unique DMAE channel number for FW DMAE transactions. Driver assigns DMAE channel 0 for FW as part of START_RAMROD command. FW uses this channel for DMAE operations (e.g., TIME_SYNC implementation). Driver also uses the same channel 0 for DMAE operations for some of the PFs (e.g., PF0 on Port0). This could lead to concurrent access to the DMAE channel by FW and driver which is not legal. Hence need to assign unique DMAE id for FW. Currently following DMAE channels are used by the clients, MFW - OCBB/OCSD functionality uses DMAE channel 14/15 Driver 0-3 and 8-11 (for PF dmae operations) 4 and 12 (for stats requests) Assigning unique dmae_id '13' to the FW. Changes from previous version: ------------------------------ v2: Incorporated the review comments. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 7 +++++++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 1 + 2 files changed, 8 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index be1506169076f0..0de487a8f0eb22 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -2191,6 +2191,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ E1HVN_MAX) +/* Following is the DMAE channel number allocation for the clients. + * MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively. + * Driver: 0-3 and 8-11 (for PF dmae operations) + * 4 and 12 (for stats requests) + */ +#define BNX2X_FW_DMAE_C 13 /* Channel for FW DMAE operations */ + /* PCIE link and speed */ #define PCICFG_LINK_WIDTH 0x1f00000 #define PCICFG_LINK_WIDTH_SHIFT 20 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 3f4d2c8da21a3a..a9eaaf3e73a4c4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -6149,6 +6149,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); rdata->path_id = BP_PATH(bp); rdata->network_cos_mode = start_params->network_cos_mode; + rdata->dmae_cmd_id = BNX2X_FW_DMAE_C; rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port); rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port); From 9aaa4e8ba12972d674caeefbc5f88d83235dd697 Mon Sep 17 00:00:00 2001 From: Denis Bolotin Date: Mon, 12 Nov 2018 12:50:20 +0200 Subject: [PATCH 24/61] qed: Fix PTT leak in qed_drain() Release PTT before entering error flow. Signed-off-by: Denis Bolotin Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 35fd0db6a67777..fff7f04d4525c5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1782,9 +1782,9 @@ static int qed_drain(struct qed_dev *cdev) return -EBUSY; } rc = qed_mcp_drain(hwfn, ptt); + qed_ptt_release(hwfn, ptt); if (rc) return rc; - qed_ptt_release(hwfn, ptt); } return 0; From e90202ed1cf9672c48a363c84a929932ebfe6fc0 Mon Sep 17 00:00:00 2001 From: Denis Bolotin Date: Mon, 12 Nov 2018 12:50:21 +0200 Subject: [PATCH 25/61] qed: Fix overriding offload_tc by protocols without APP TLV The TC received from APP TLV is stored in offload_tc, and should not be set by protocols which did not receive an APP TLV. Fixed the condition when overriding the offload_tc. Signed-off-by: Denis Bolotin Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 8e8fa823d61187..69966dfc6e3d12 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -191,7 +191,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data) static void qed_dcbx_set_params(struct qed_dcbx_results *p_data, struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - bool enable, u8 prio, u8 tc, + bool app_tlv, bool enable, u8 prio, u8 tc, enum dcbx_protocol_type type, enum qed_pci_personality personality) { @@ -210,7 +210,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, p_data->arr[type].dont_add_vlan0 = true; /* QM reconf data */ - if (p_hwfn->hw_info.personality == personality) + if (app_tlv && p_hwfn->hw_info.personality == personality) qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); /* Configure dcbx vlan priority in doorbell block for roce EDPM */ @@ -225,7 +225,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, static void qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - bool enable, u8 prio, u8 tc, + bool app_tlv, bool enable, u8 prio, u8 tc, enum dcbx_protocol_type type) { enum qed_pci_personality personality; @@ -240,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, personality = qed_dcbx_app_update[i].personality; - qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable, + qed_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable, prio, tc, type, personality); } } @@ -319,8 +319,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enable = true; } - qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, - priority, tc, type); + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true, + enable, priority, tc, type); } } @@ -341,7 +341,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, continue; enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; - qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false, enable, priority, tc, type); } From 291d57f67d2449737d1e370ab5b9a583818eaa0c Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Mon, 12 Nov 2018 12:50:22 +0200 Subject: [PATCH 26/61] qed: Fix rdma_info structure allocation Certain flows need to access the rdma-info structure, for example dcbx update flows. In some cases there can be a race between the allocation or deallocation of the structure which was done in roce start / roce stop and an asynchrounous dcbx event that tries to access the structure. For this reason, we move the allocation of the rdma_info structure to be similar to the iscsi/fcoe info structures which are allocated during device setup. We add a new field of "active" to the struct to define whether roce has already been started or not, and this is checked instead of whether the pointer to the info structure. Fixes: 51ff17251c9c ("qed: Add support for RoCE hw init") Signed-off-by: Michal Kalderon Signed-off-by: Denis Bolotin Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 15 +++++-- drivers/net/ethernet/qlogic/qed/qed_rdma.c | 50 +++++++++++++--------- drivers/net/ethernet/qlogic/qed/qed_rdma.h | 5 +++ 3 files changed, 45 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 7ceb2b97538d25..cff141077558c8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -185,6 +185,10 @@ void qed_resc_free(struct qed_dev *cdev) qed_iscsi_free(p_hwfn); qed_ooo_free(p_hwfn); } + + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) + qed_rdma_info_free(p_hwfn); + qed_iov_free(p_hwfn); qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); @@ -1081,6 +1085,12 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_err; } + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { + rc = qed_rdma_info_alloc(p_hwfn); + if (rc) + goto alloc_err; + } + /* DMA info initialization */ rc = qed_dmae_info_alloc(p_hwfn); if (rc) @@ -2102,11 +2112,8 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) if (!p_ptt) return -EAGAIN; - /* If roce info is allocated it means roce is initialized and should - * be enabled in searcher. - */ if (p_hwfn->p_rdma_info && - p_hwfn->b_rdma_enabled_in_prs) + p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs) qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1); /* Re-open incoming traffic */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 62113438c8809c..7873d6dfd91f55 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -140,22 +140,34 @@ static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; } -static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_rdma_start_in_params *params) +int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) { struct qed_rdma_info *p_rdma_info; - u32 num_cons, num_tasks; - int rc = -ENOMEM; - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); - - /* Allocate a struct with current pf rdma info */ p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); if (!p_rdma_info) - return rc; + return -ENOMEM; + + spin_lock_init(&p_rdma_info->lock); p_hwfn->p_rdma_info = p_rdma_info; + return 0; +} + +void qed_rdma_info_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->p_rdma_info); + p_hwfn->p_rdma_info = NULL; +} + +static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + u32 num_cons, num_tasks; + int rc = -ENOMEM; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) p_rdma_info->proto = PROTOCOLID_IWARP; else @@ -183,7 +195,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, /* Allocate a struct with device params and fill it */ p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); if (!p_rdma_info->dev) - goto free_rdma_info; + return rc; /* Allocate a struct with port params and fill it */ p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); @@ -298,8 +310,6 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, kfree(p_rdma_info->port); free_rdma_dev: kfree(p_rdma_info->dev); -free_rdma_info: - kfree(p_rdma_info); return rc; } @@ -370,8 +380,6 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) kfree(p_rdma_info->port); kfree(p_rdma_info->dev); - - kfree(p_rdma_info); } static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) @@ -679,8 +687,6 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); - spin_lock_init(&p_hwfn->p_rdma_info->lock); - qed_rdma_init_devinfo(p_hwfn, params); qed_rdma_init_port(p_hwfn); qed_rdma_init_events(p_hwfn, params); @@ -727,7 +733,7 @@ static int qed_rdma_stop(void *rdma_cxt) /* Disable RoCE search */ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); p_hwfn->b_rdma_enabled_in_prs = false; - + p_hwfn->p_rdma_info->active = 0; qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); @@ -1236,7 +1242,8 @@ qed_rdma_create_qp(void *rdma_cxt, u8 max_stats_queues; int rc; - if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) { + if (!rdma_cxt || !in_params || !out_params || + !p_hwfn->p_rdma_info->active) { DP_ERR(p_hwfn->cdev, "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", rdma_cxt, in_params, out_params); @@ -1802,8 +1809,8 @@ bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) { bool result; - /* if rdma info has not been allocated, naturally there are no qps */ - if (!p_hwfn->p_rdma_info) + /* if rdma wasn't activated yet, naturally there are no qps */ + if (!p_hwfn->p_rdma_info->active) return false; spin_lock_bh(&p_hwfn->p_rdma_info->lock); @@ -1849,7 +1856,7 @@ static int qed_rdma_start(void *rdma_cxt, if (!p_ptt) goto err; - rc = qed_rdma_alloc(p_hwfn, p_ptt, params); + rc = qed_rdma_alloc(p_hwfn); if (rc) goto err1; @@ -1858,6 +1865,7 @@ static int qed_rdma_start(void *rdma_cxt, goto err2; qed_ptt_release(p_hwfn, p_ptt); + p_hwfn->p_rdma_info->active = 1; return rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 6f722ee8ee945b..50d609c0e108d8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -102,6 +102,7 @@ struct qed_rdma_info { u16 max_queue_zones; enum protocol_type proto; struct qed_iwarp_info iwarp; + u8 active:1; }; struct qed_rdma_qp { @@ -176,10 +177,14 @@ struct qed_rdma_qp { #if IS_ENABLED(CONFIG_QED_RDMA) void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn); +void qed_rdma_info_free(struct qed_hwfn *p_hwfn); #else static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} +static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL} +static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {} #endif int From ed4eac20dcffdad47709422e0cb925981b056668 Mon Sep 17 00:00:00 2001 From: Denis Bolotin Date: Mon, 12 Nov 2018 12:50:23 +0200 Subject: [PATCH 27/61] qed: Fix reading wrong value in loop condition The value of "sb_index" is written by the hardware. Reading its value and writing it to "index" must finish before checking the loop condition. Signed-off-by: Denis Bolotin Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_int.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 0f0aba793352c4..b22f464ea3fa77 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -992,6 +992,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) */ do { index = p_sb_attn->sb_index; + /* finish reading index before the loop condition */ + dma_rmb(); attn_bits = le32_to_cpu(p_sb_attn->atten_bits); attn_acks = le32_to_cpu(p_sb_attn->atten_ack); } while (index != p_sb_attn->sb_index); From 007b656851ed7f94ba0fa358ac3e5d7705da6846 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Mon, 12 Nov 2018 17:06:12 +0100 Subject: [PATCH 28/61] s390/ism: clear dmbe_mask bit before SMC IRQ handling SMC-D stress workload showed connection stalls. Since the firmware decides to skip raising an interrupt if the SBA DMBE mask bit is still set, this SBA DMBE mask bit should be cleared before the IRQ handling in the SMC code runs. Otherwise there are small windows possible with missing interrupts for incoming data. SMC-D currently does not care about the old value of the SBA DMBE mask. Acked-by: Sebastian Ott Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/ism_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index f96ec68af2e58a..dcbf5c85774378 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -415,9 +415,9 @@ static irqreturn_t ism_handle_irq(int irq, void *data) break; clear_bit_inv(bit, bv); + ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; barrier(); smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET); - ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; } if (ism->sba->e) { From f8504f4ca0a0e9f84546ef86e00b24d2ea9a0bd2 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 13 Nov 2018 01:08:25 +0800 Subject: [PATCH 29/61] l2tp: fix a sock refcnt leak in l2tp_tunnel_register This issue happens when trying to add an existent tunnel. It doesn't call sock_put() before returning -EEXIST to release the sock refcnt that was held by calling sock_hold() before the existence check. This patch is to fix it by holding the sock after doing the existence check. Fixes: f6cd651b056f ("l2tp: fix race in duplicate tunnel detection") Reported-by: Jianlin Shi Signed-off-by: Xin Long Reviewed-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 82cdf9020b5392..26f1d435696a62 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1490,12 +1490,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, goto err_sock; } - sk = sock->sk; - - sock_hold(sk); - tunnel->sock = sk; tunnel->l2tp_net = net; - pn = l2tp_pernet(net); spin_lock_bh(&pn->l2tp_tunnel_list_lock); @@ -1510,6 +1505,10 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); spin_unlock_bh(&pn->l2tp_tunnel_list_lock); + sk = sock->sk; + sock_hold(sk); + tunnel->sock = sk; + if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { struct udp_tunnel_sock_cfg udp_cfg = { .sk_user_data = tunnel, From 6ba990384e924476b5eed1734f3bcca0df6fd77e Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 15 Nov 2018 03:25:37 -0500 Subject: [PATCH 30/61] bnxt_en: Fix RSS context allocation. Recent commit has added the reservation of RSS context. This requires bnxt_hwrm_vnic_qcaps() to be called before allocating any RSS contexts. The bnxt_hwrm_vnic_qcaps() call sets up proper flags that will determine how many RSS contexts to allocate to support NTUPLE. This causes a regression that too many RSS contexts are being reserved and causing resource shortage when enabling many VFs. Fix it by calling bnxt_hwrm_vnic_qcaps() earlier. Fixes: 41e8d7983752 ("bnxt_en: Modify the ring reservation functions for 57500 series chips.") Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index dd85d790f63893..4a45a2b809eacd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -10087,6 +10087,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } bnxt_hwrm_func_qcfg(bp); + bnxt_hwrm_vnic_qcaps(bp); bnxt_hwrm_port_led_qcaps(bp); bnxt_ethtool_init(bp); bnxt_dcb_init(bp); @@ -10120,7 +10121,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; } - bnxt_hwrm_vnic_qcaps(bp); if (bnxt_rfs_supported(bp)) { dev->hw_features |= NETIF_F_NTUPLE; if (bnxt_rfs_capable(bp)) { From d19819297d9284bd990e22116b8b43d0abcbf488 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 15 Nov 2018 03:25:38 -0500 Subject: [PATCH 31/61] bnxt_en: Fix rx_l4_csum_errors counter on 57500 devices. The software counter structure is defined in both the CP ring's structure and the NQ ring's structure on the new devices. The legacy code adds the counter to the CP ring's structure and the counter won't get displayed since the ethtool code is looking at the NQ ring's structure. Since all other counters are contained in the NQ ring's structure, it makes more sense to count rx_l4_csum_errors in the NQ. Fixes: 50e3ab7836b5 ("bnxt_en: Allocate completion ring structures for 57500 series chips.") Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4a45a2b809eacd..585609990eee0b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1675,7 +1675,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } else { if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { if (dev->features & NETIF_F_RXCSUM) - cpr->rx_l4_csum_errors++; + bnapi->cp_ring.rx_l4_csum_errors++; } } From addd4df6d763556e16d5316e4e8cd441050cc2af Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 15 Nov 2018 03:25:39 -0500 Subject: [PATCH 32/61] bnxt_en: Disable RDMA support on the 57500 chips. There is no RDMA support on 57500 chips yet, so prevent bnxt_re from registering on these chips. There is intermittent failure if bnxt_re is allowed to register and proceed with RDMA operations. Fixes: 1ab968d2f1d6 ("bnxt_en: Add PCI ID for BCM57508 device.") Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index beee61292d5e52..b59b382d34f942 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -43,6 +43,9 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id, if (ulp_id == BNXT_ROCE_ULP) { unsigned int max_stat_ctxs; + if (bp->flags & BNXT_FLAG_CHIP_P5) + return -EOPNOTSUPP; + max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS || bp->num_stat_ctxs == max_stat_ctxs) From ffd77621700ec3adcf859681e24910c38e0931f5 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 15 Nov 2018 03:25:40 -0500 Subject: [PATCH 33/61] bnxt_en: Workaround occasional TX timeout on 57500 A0. Hardware can sometimes not generate NQ MSIX with a single pending CP ring entry. This seems to always happen at the last entry of the CP ring before it wraps. Add logic to check all the CP rings for pending entries without the CP ring consumer index advancing. Calling HWRM_DBG_RING_INFO_GET to read the context of the CP ring will flush out the NQ entry and MSIX. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 65 +++++++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 3 ++ 2 files changed, 68 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 585609990eee0b..5d4147a75cadc1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -8714,6 +8714,26 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) return rc; } +static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, + u32 ring_id, u32 *prod, u32 *cons) +{ + struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_dbg_ring_info_get_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1); + req.ring_type = ring_type; + req.fw_ring_id = cpu_to_le32(ring_id); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + *prod = le32_to_cpu(resp->producer_index); + *cons = le32_to_cpu(resp->consumer_index); + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring; @@ -8821,6 +8841,11 @@ static void bnxt_timer(struct timer_list *t) bnxt_queue_sp_work(bp); } } + + if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) { + set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); + } bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); } @@ -8851,6 +8876,43 @@ static void bnxt_reset(struct bnxt *bp, bool silent) bnxt_rtnl_unlock_sp(bp); } +static void bnxt_chk_missed_irq(struct bnxt *bp) +{ + int i; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + u32 fw_ring_id; + int j; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + for (j = 0; j < 2; j++) { + struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j]; + u32 val[2]; + + if (!cpr2 || cpr2->has_more_work || + !bnxt_has_work(bp, cpr2)) + continue; + + if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { + cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; + continue; + } + fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, + fw_ring_id, &val[0], &val[1]); + } + } +} + static void bnxt_cfg_ntp_filters(struct bnxt *); static void bnxt_sp_task(struct work_struct *work) @@ -8930,6 +8992,9 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) bnxt_tc_flow_stats_work(bp); + if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) + bnxt_chk_missed_irq(bp); + /* These functions below will clear BNXT_STATE_IN_SP_TASK. They * must be the last functions to be called before exiting. */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 498b373c992d37..00bd17e55e99b9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -798,6 +798,8 @@ struct bnxt_cp_ring_info { u8 had_work_done:1; u8 has_more_work:1; + u32 last_cp_raw_cons; + struct bnxt_coal rx_ring_coal; u64 rx_packets; u64 rx_bytes; @@ -1527,6 +1529,7 @@ struct bnxt { #define BNXT_LINK_SPEED_CHNG_SP_EVENT 14 #define BNXT_FLOW_STATS_SP_EVENT 15 #define BNXT_UPDATE_PHY_SP_EVENT 16 +#define BNXT_RING_COAL_NOW_SP_EVENT 17 struct bnxt_hw_resc hw_resc; struct bnxt_pf_info pf; From 83eb5c5cff32681f3769f502cb5589c7d7509bfe Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 15 Nov 2018 03:25:41 -0500 Subject: [PATCH 34/61] bnxt_en: Add software "missed_irqs" counter. To keep track of the number of times the workaround code for 57500 A0 has been triggered. This is a per NQ counter. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 5 ++++- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5d4147a75cadc1..d4c30011752992 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -8909,6 +8909,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp) bnxt_dbg_hwrm_ring_info_get(bp, DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, fw_ring_id, &val[0], &val[1]); + cpr->missed_irqs++; } } } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 00bd17e55e99b9..9e99d4ab3e062f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -818,6 +818,7 @@ struct bnxt_cp_ring_info { dma_addr_t hw_stats_map; u32 hw_stats_ctx_id; u64 rx_l4_csum_errors; + u64 missed_irqs; struct bnxt_ring_struct cp_ring_struct; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 48078564f0258c..4b734cd81f8b16 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -137,7 +137,7 @@ static int bnxt_set_coalesce(struct net_device *dev, return rc; } -#define BNXT_NUM_STATS 21 +#define BNXT_NUM_STATS 22 #define BNXT_RX_STATS_ENTRY(counter) \ { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } @@ -384,6 +384,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, for (k = 0; k < stat_fields; j++, k++) buf[j] = le64_to_cpu(hw_stats[k]); buf[j++] = cpr->rx_l4_csum_errors; + buf[j++] = cpr->missed_irqs; bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += le64_to_cpu(cpr->hw_stats->rx_discard_pkts); @@ -468,6 +469,8 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) buf += ETH_GSTRING_LEN; sprintf(buf, "[%d]: rx_l4_csum_errors", i); buf += ETH_GSTRING_LEN; + sprintf(buf, "[%d]: missed_irqs", i); + buf += ETH_GSTRING_LEN; } for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { strcpy(buf, bnxt_sw_func_stats[i].string); From 8dc5ae2d48976764cf3498e97963fa06befefb0e Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Thu, 15 Nov 2018 03:25:42 -0500 Subject: [PATCH 35/61] bnxt_en: Fix filling time in bnxt_fill_coredump_record() Fix the year and month offset while storing it in bnxt_fill_coredump_record(). Fixes: 6c5657d085ae ("bnxt_en: Add support for ethtool get dump.") Signed-off-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 4b734cd81f8b16..6cc69a58478a5f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2945,8 +2945,8 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, record->asic_state = 0; strlcpy(record->system_name, utsname()->nodename, sizeof(record->system_name)); - record->year = cpu_to_le16(tm.tm_year); - record->month = cpu_to_le16(tm.tm_mon); + record->year = cpu_to_le16(tm.tm_year + 1900); + record->month = cpu_to_le16(tm.tm_mon + 1); record->day = cpu_to_le16(tm.tm_mday); record->hour = cpu_to_le16(tm.tm_hour); record->minute = cpu_to_le16(tm.tm_min); From b8c3c10cf68d7556466bb7d99f249ed586ddfbe3 Mon Sep 17 00:00:00 2001 From: Thor Thayer Date: Mon, 12 Nov 2018 11:50:56 -0600 Subject: [PATCH 36/61] MAINTAINERS: Replace Vince Bridgers as Altera TSE maintainer Vince has moved to a different role. Replace him as Altera TSE maintainer. Signed-off-by: Thor Thayer Acked-by: Vince Bridgers Acked-by: Alan Tull Signed-off-by: David S. Miller --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 0abecc528daca1..5a4bd37d9d02ab 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -717,7 +717,7 @@ F: include/linux/mfd/altera-a10sr.h F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h ALTERA TRIPLE SPEED ETHERNET DRIVER -M: Vince Bridgers +M: Thor Thayer L: netdev@vger.kernel.org L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) S: Maintained From ebcd210e93b2a984b7a7b82d45f7f0d21b7ec2d2 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Thu, 15 Nov 2018 15:36:21 +0530 Subject: [PATCH 37/61] cxgb4: fix thermal zone build error with CONFIG_THERMAL=m and cxgb4 as built-in build fails, and 'commit e70a57fa59bb ("cxgb4: fix thermal configuration dependencies")' tries to fix it but when cxgb4i is made built-in build fails again, use IS_REACHABLE instead of IS_ENABLED to fix the issue. Fixes: e70a57fa59bb (cxgb4: fix thermal configuration dependencies) Reported-by: Randy Dunlap Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/Kconfig | 1 - drivers/net/ethernet/chelsio/cxgb4/Makefile | 4 +--- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 4 ++-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index 75c1c5ed238784..e2cdfa75673fd5 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -67,7 +67,6 @@ config CHELSIO_T3 config CHELSIO_T4 tristate "Chelsio Communications T4/T5/T6 Ethernet support" depends on PCI && (IPV6 || IPV6=n) - depends on THERMAL || !THERMAL select FW_LOADER select MDIO select ZLIB_DEFLATE diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 78e5d17a1d5fb2..91d8a885deba9b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -12,6 +12,4 @@ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o -ifdef CONFIG_THERMAL -cxgb4-objs += cxgb4_thermal.o -endif +cxgb4-$(CONFIG_THERMAL) += cxgb4_thermal.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 05a46926016a5e..d49db46254cd7d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5863,7 +5863,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!is_t4(adapter->params.chip)) cxgb4_ptp_init(adapter); - if (IS_ENABLED(CONFIG_THERMAL) && + if (IS_REACHABLE(CONFIG_THERMAL) && !is_t4(adapter->params.chip) && (adapter->flags & FW_OK)) cxgb4_thermal_init(adapter); @@ -5932,7 +5932,7 @@ static void remove_one(struct pci_dev *pdev) if (!is_t4(adapter->params.chip)) cxgb4_ptp_stop(adapter); - if (IS_ENABLED(CONFIG_THERMAL)) + if (IS_REACHABLE(CONFIG_THERMAL)) cxgb4_thermal_remove(adapter); /* If we allocated filters, free up state associated with any From 7150ceaacb27f7b3bf494e72cd4be4e11612dfff Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 12 Nov 2018 22:33:22 +0000 Subject: [PATCH 38/61] rxrpc: Fix life check The life-checking function, which is used by kAFS to make sure that a call is still live in the event of a pending signal, only samples the received packet serial number counter; it doesn't actually provoke a change in the counter, rather relying on the server to happen to give us a packet in the time window. Fix this by adding a function to force a ping to be transmitted. kAFS then keeps track of whether there's been a stall, and if so, uses the new function to ping the server, resetting the timeout to allow the reply to come back. If there's a stall, a ping and the call is *still* stalled in the same place after another period, then the call will be aborted. Fixes: bc5e3a546d55 ("rxrpc: Use MSG_WAITALL to tell sendmsg() to temporarily ignore signals") Fixes: f4d15fb6f99a ("rxrpc: Provide functions for allowing cleaner handling of signals") Signed-off-by: David Howells Signed-off-by: David S. Miller --- Documentation/networking/rxrpc.txt | 17 +++++++++++------ fs/afs/rxrpc.c | 11 ++++++++++- include/net/af_rxrpc.h | 3 ++- include/trace/events/rxrpc.h | 2 ++ net/rxrpc/af_rxrpc.c | 27 +++++++++++++++++++++++---- 5 files changed, 48 insertions(+), 12 deletions(-) diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt index 605e00cdd6beb1..89f1302d593a5c 100644 --- a/Documentation/networking/rxrpc.txt +++ b/Documentation/networking/rxrpc.txt @@ -1056,18 +1056,23 @@ The kernel interface functions are as follows: u32 rxrpc_kernel_check_life(struct socket *sock, struct rxrpc_call *call); + void rxrpc_kernel_probe_life(struct socket *sock, + struct rxrpc_call *call); - This returns a number that is updated when ACKs are received from the peer - (notably including PING RESPONSE ACKs which we can elicit by sending PING - ACKs to see if the call still exists on the server). The caller should - compare the numbers of two calls to see if the call is still alive after - waiting for a suitable interval. + The first function returns a number that is updated when ACKs are received + from the peer (notably including PING RESPONSE ACKs which we can elicit by + sending PING ACKs to see if the call still exists on the server). The + caller should compare the numbers of two calls to see if the call is still + alive after waiting for a suitable interval. This allows the caller to work out if the server is still contactable and if the call is still alive on the server whilst waiting for the server to process a client operation. - This function may transmit a PING ACK. + The second function causes a ping ACK to be transmitted to try to provoke + the peer into responding, which would then cause the value returned by the + first function to change. Note that this must be called in TASK_RUNNING + state. (*) Get reply timestamp. diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 59970886690f1b..a7b44863d502e9 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -576,6 +576,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call, { signed long rtt2, timeout; long ret; + bool stalled = false; u64 rtt; u32 life, last_life; @@ -609,12 +610,20 @@ static long afs_wait_for_call_to_complete(struct afs_call *call, life = rxrpc_kernel_check_life(call->net->socket, call->rxcall); if (timeout == 0 && - life == last_life && signal_pending(current)) + life == last_life && signal_pending(current)) { + if (stalled) break; + __set_current_state(TASK_RUNNING); + rxrpc_kernel_probe_life(call->net->socket, call->rxcall); + timeout = rtt2; + stalled = true; + continue; + } if (life != last_life) { timeout = rtt2; last_life = life; + stalled = false; } timeout = schedule_timeout(timeout); diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index de587948042a4a..1adefe42c0a689 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -77,7 +77,8 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *, struct sockaddr_rxrpc *, struct key *); int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *, enum rxrpc_call_completion *, u32 *); -u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *); +u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); +void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *, ktime_t *); diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 573d5b901fb118..5b50fe4906d2ef 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -181,6 +181,7 @@ enum rxrpc_timer_trace { enum rxrpc_propose_ack_trace { rxrpc_propose_ack_client_tx_end, rxrpc_propose_ack_input_data, + rxrpc_propose_ack_ping_for_check_life, rxrpc_propose_ack_ping_for_keepalive, rxrpc_propose_ack_ping_for_lost_ack, rxrpc_propose_ack_ping_for_lost_reply, @@ -380,6 +381,7 @@ enum rxrpc_tx_point { #define rxrpc_propose_ack_traces \ EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \ EM(rxrpc_propose_ack_input_data, "DataIn ") \ + EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \ EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \ EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \ EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \ diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 64362d078da846..a2522f9d71e266 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -375,16 +375,35 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call); * getting ACKs from the server. Returns a number representing the life state * which can be compared to that returned by a previous call. * - * If this is a client call, ping ACKs will be sent to the server to find out - * whether it's still responsive and whether the call is still alive on the - * server. + * If the life state stalls, rxrpc_kernel_probe_life() should be called and + * then 2RTT waited. */ -u32 rxrpc_kernel_check_life(struct socket *sock, struct rxrpc_call *call) +u32 rxrpc_kernel_check_life(const struct socket *sock, + const struct rxrpc_call *call) { return call->acks_latest; } EXPORT_SYMBOL(rxrpc_kernel_check_life); +/** + * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive + * @sock: The socket the call is on + * @call: The call to check + * + * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to + * find out whether a call is still alive by pinging it. This should cause the + * life state to be bumped in about 2*RTT. + * + * The must be called in TASK_RUNNING state on pain of might_sleep() objecting. + */ +void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call) +{ + rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, + rxrpc_propose_ack_ping_for_check_life); + rxrpc_send_ack_packet(call, true, NULL); +} +EXPORT_SYMBOL(rxrpc_kernel_probe_life); + /** * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call. * @sock: The socket the call is on From 08e14fe429a07475ee9f29a283945d602e4a6d92 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 12 Nov 2018 16:17:16 -0800 Subject: [PATCH 39/61] net_sched: sch_fq: ensure maxrate fq parameter applies to EDT flows When EDT conversion happened, fq lost the ability to enfore a maxrate for all flows. It kept it for non EDT flows. This commit restores the functionality. Tested: tc qd replace dev eth0 root fq maxrate 500Mbit netperf -P0 -H host -- -O THROUGHPUT 489.75 Fixes: ab408b6dc744 ("tcp: switch tcp and sch_fq to new earliest departure time model") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/sched/sch_fq.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 4b1af706896c07..25a7cf6d380fd1 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -469,22 +469,29 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch) goto begin; } prefetch(&skb->end); - f->credit -= qdisc_pkt_len(skb); + plen = qdisc_pkt_len(skb); + f->credit -= plen; - if (ktime_to_ns(skb->tstamp) || !q->rate_enable) + if (!q->rate_enable) goto out; rate = q->flow_max_rate; - if (skb->sk) - rate = min(skb->sk->sk_pacing_rate, rate); - - if (rate <= q->low_rate_threshold) { - f->credit = 0; - plen = qdisc_pkt_len(skb); - } else { - plen = max(qdisc_pkt_len(skb), q->quantum); - if (f->credit > 0) - goto out; + + /* If EDT time was provided for this skb, we need to + * update f->time_next_packet only if this qdisc enforces + * a flow max rate. + */ + if (!skb->tstamp) { + if (skb->sk) + rate = min(skb->sk->sk_pacing_rate, rate); + + if (rate <= q->low_rate_threshold) { + f->credit = 0; + } else { + plen = max(plen, q->quantum); + if (f->credit > 0) + goto out; + } } if (rate != ~0UL) { u64 len = (u64)plen * NSEC_PER_SEC; From bd85fbc2038a1bbe84990b23ff69b6fc81a32b2c Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Thu, 15 Nov 2018 18:05:13 +0200 Subject: [PATCH 40/61] net/mlx4_core: Zero out lkey field in SW2HW_MPT fw command When re-registering a user mr, the mpt information for the existing mr when running SRIOV is obtained via the QUERY_MPT fw command. The returned information includes the mpt's lkey. This retrieved mpt information is used to move the mpt back to hardware ownership in the rereg flow (via the SW2HW_MPT fw command when running SRIOV). The fw API spec states that for SW2HW_MPT, the lkey field must be zero. Any ConnectX-3 PF driver which checks for strict spec adherence will return failure for SW2HW_MPT if the lkey field is not zero (although the fw in practice ignores this field for SW2HW_MPT). Thus, in order to conform to the fw API spec, set the lkey field to zero before invoking SW2HW_MPT when running SRIOV. Fixes: e630664c8383 ("mlx4_core: Add helper functions to support MR re-registration") Signed-off-by: Jack Morgenstein Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/mr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 2e84f10f59ba9c..1a11bc0e16123e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -363,6 +363,7 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, buf); + (*mpt_entry)->lkey = 0; err = mlx4_SW2HW_MPT(dev, mailbox, key); } From 3ea7e7ea53c9f6ee41cb69a29c375fe9dd9a56a7 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 15 Nov 2018 18:05:14 +0200 Subject: [PATCH 41/61] net/mlx4_core: Fix uninitialized variable compilation warning Initialize the uid variable to zero to avoid the compilation warning. Fixes: 7a89399ffad7 ("net/mlx4: Add mlx4_bitmap zone allocator") Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index deef5a998985a9..9af34e03892c19 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -337,7 +337,7 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc) static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count, int align, u32 skip_mask, u32 *puid) { - u32 uid; + u32 uid = 0; u32 res; struct mlx4_zone_allocator *zone_alloc = zone->allocator; struct mlx4_zone_entry *curr_node; From a463146e67c848cbab5ce706d6528281b7cded08 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Thu, 15 Nov 2018 18:05:15 +0200 Subject: [PATCH 42/61] net/mlx4: Fix UBSAN warning of signed integer overflow UBSAN: Undefined behavior in drivers/net/ethernet/mellanox/mlx4/resource_tracker.c:626:29 signed integer overflow: 1802201963 + 1802201963 cannot be represented in type 'int' The union of res_reserved and res_port_rsvd[MLX4_MAX_PORTS] monitors granting of reserved resources. The grant operation is calculated and protected, thus both members of the union cannot be negative. Changed type of res_reserved and of res_port_rsvd[MLX4_MAX_PORTS] from signed int to unsigned int, allowing large value. Fixes: 5a0d0a6161ae ("mlx4: Structures and init/teardown for VF resource quotas") Signed-off-by: Aya Levin Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index ebcd2778eeb3e1..23f1b5b512c219 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -540,8 +540,8 @@ struct slave_list { struct resource_allocator { spinlock_t alloc_lock; /* protect quotas */ union { - int res_reserved; - int res_port_rsvd[MLX4_MAX_PORTS]; + unsigned int res_reserved; + unsigned int res_port_rsvd[MLX4_MAX_PORTS]; }; union { int res_free; From a97b9565338350d70d8d971c4ee6f0d4fa967418 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 15 Nov 2018 16:15:20 -0800 Subject: [PATCH 43/61] drivers/net/ethernet/qlogic/qed/qed_rdma.h: fix typo Add missing semicolon. Fixes: 291d57f67d244973 ("qed: Fix rdma_info structure allocation") Cc: Michal Kalderon Cc: Denis Bolotin Cc: David S. Miller Signed-off-by: Andrew Morton Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_rdma.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 50d609c0e108d8..3689fe3e593542 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -183,7 +183,7 @@ void qed_rdma_info_free(struct qed_hwfn *p_hwfn); static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} -static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL} +static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;} static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {} #endif From 83e65df6dfece9eb588735459428f221eb930c0c Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Fri, 9 Nov 2018 09:17:33 +0100 Subject: [PATCH 44/61] net: mvneta: Don't advertise 2.5G modes Using 2.5G speed relies on the SerDes lanes being configured accordingly. The lanes have to be reconfigured to switch between 1G and 2.5G, and for now only the bootloader does this configuration. In the case we add a Comphy driver to handle switching the lanes dynamically, it's better for now to stick with supporting only 1G and add advertisement for 2.5G once we really are capable of handling both speeds without problem. Since the interface mode is initialy taken from the DT, we want to make sure that adding comphy support won't break boards that don't update their dtb. Fixes: da58a931f248 ("net: mvneta: Add support for 2500Mbps SGMII") Reported-by: Andrew Lunn Reported-by: Russell King Signed-off-by: Maxime Chevallier Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 3ba672e9e353d2..e5397c8197b9c3 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3343,7 +3343,6 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported, if (state->interface != PHY_INTERFACE_MODE_NA && state->interface != PHY_INTERFACE_MODE_QSGMII && state->interface != PHY_INTERFACE_MODE_SGMII && - state->interface != PHY_INTERFACE_MODE_2500BASEX && !phy_interface_mode_is_8023z(state->interface) && !phy_interface_mode_is_rgmii(state->interface)) { bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); @@ -3357,14 +3356,9 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported, /* Asymmetric pause is unsupported */ phylink_set(mask, Pause); - /* We cannot use 1Gbps when using the 2.5G interface. */ - if (state->interface == PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - } else { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - } + /* Half-duplex at speeds higher than 100Mbit is unsupported */ + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); if (!phy_interface_mode_is_8023z(state->interface)) { /* 10M and 100M are only supported in non-802.3z mode */ From 761f60261b4401aa368d71d431b4c218af0efcee Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 14 Nov 2018 00:48:28 +0800 Subject: [PATCH 45/61] ipv6: fix a dst leak when removing its exception These is no need to hold dst before calling rt6_remove_exception_rt(). The call to dst_hold_safe() in ip6_link_failure() was for ip6_del_rt(), which has been removed in Commit 93531c674315 ("net/ipv6: separate handling of FIB entries from dst based routes"). Otherwise, it will cause a dst leak. This patch is to simply remove the dst_hold_safe() call before calling rt6_remove_exception_rt() and also do the same in ip6_del_cached_rt(). It's safe, because the removal of the exception that holds its dst's refcnt is protected by rt6_exception_lock. Fixes: 93531c674315 ("net/ipv6: separate handling of FIB entries from dst based routes") Fixes: 23fb93a4d3f1 ("net/ipv6: Cleanup exception and cache route handling") Reported-by: Li Shuang Signed-off-by: Xin Long Reviewed-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/route.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 2a7423c394560c..14b422f35504f3 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2232,8 +2232,7 @@ static void ip6_link_failure(struct sk_buff *skb) if (rt) { rcu_read_lock(); if (rt->rt6i_flags & RTF_CACHE) { - if (dst_hold_safe(&rt->dst)) - rt6_remove_exception_rt(rt); + rt6_remove_exception_rt(rt); } else { struct fib6_info *from; struct fib6_node *fn; @@ -3214,8 +3213,8 @@ static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg) if (cfg->fc_flags & RTF_GATEWAY && !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) goto out; - if (dst_hold_safe(&rt->dst)) - rc = rt6_remove_exception_rt(rt); + + rc = rt6_remove_exception_rt(rt); out: return rc; } From 06bc4d0079ab6a2de86f56703ce1bd13e90b9d9d Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 13 Nov 2018 18:42:24 +0100 Subject: [PATCH 46/61] net: lantiq: Fix returned value in case of error in 'xrx200_probe()' Return 'err' in the error handling path instead of 0. Return explicitly 0 in the normal path, instead of 'err', which is known to be 0 at this point. Fixes: fe1a56420cf2 ("net: lantiq: Add Lantiq / Intel VRX200 Ethernet driver") Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/lantiq_xrx200.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 8c5ba4b81fb733..2d4d10a017e59b 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -512,7 +512,8 @@ static int xrx200_probe(struct platform_device *pdev) err = register_netdev(net_dev); if (err) goto err_unprepare_clk; - return err; + + return 0; err_unprepare_clk: clk_disable_unprepare(priv->clk); @@ -520,7 +521,7 @@ static int xrx200_probe(struct platform_device *pdev) err_uninit_dma: xrx200_hw_cleanup(priv); - return 0; + return err; } static int xrx200_remove(struct platform_device *pdev) From 19ab69107d3ecfb7cd3e38ad262a881be40c01a3 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Wed, 14 Nov 2018 12:17:25 +0100 Subject: [PATCH 47/61] net/sched: act_pedit: fix memory leak when IDR allocation fails tcf_idr_check_alloc() can return a negative value, on allocation failures (-ENOMEM) or IDR exhaustion (-ENOSPC): don't leak keys_ex in these cases. Fixes: 0190c1d452a9 ("net: sched: atomically check-allocate action") Signed-off-by: Davide Caratti Acked-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index da3dd0f68cc24d..2b372a06b432ae 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -201,7 +201,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, goto out_release; } } else { - return err; + ret = err; + goto out_free; } p = to_pedit(*a); From dfa0d55ff6be64e7b6881212a291cb95f8da3b08 Mon Sep 17 00:00:00 2001 From: Martin Schiller Date: Wed, 14 Nov 2018 12:54:49 +0100 Subject: [PATCH 48/61] net: phy: mdio-gpio: Fix working over slow can_sleep GPIOs This commit re-enables support for slow GPIO pins. It was initially introduced by commit 2d6c9091ab76 ("net: mdio-gpio: support access that may sleep") and got lost by commit 7e5fbd1e0700 ("net: mdio-gpio: Convert to use gpiod functions where possible"). Also add a warning about slow GPIO pins like it is done in i2c-gpio. Signed-off-by: Martin Schiller Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 33265747bf3994..3a5a24daf38436 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -63,7 +63,7 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) * assume the pin serves as pull-up. If direction is * output, the default value is high. */ - gpiod_set_value(bitbang->mdo, 1); + gpiod_set_value_cansleep(bitbang->mdo, 1); return; } @@ -78,7 +78,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - return gpiod_get_value(bitbang->mdio); + return gpiod_get_value_cansleep(bitbang->mdio); } static void mdio_set(struct mdiobb_ctrl *ctrl, int what) @@ -87,9 +87,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what) container_of(ctrl, struct mdio_gpio_info, ctrl); if (bitbang->mdo) - gpiod_set_value(bitbang->mdo, what); + gpiod_set_value_cansleep(bitbang->mdo, what); else - gpiod_set_value(bitbang->mdio, what); + gpiod_set_value_cansleep(bitbang->mdio, what); } static void mdc_set(struct mdiobb_ctrl *ctrl, int what) @@ -97,7 +97,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - gpiod_set_value(bitbang->mdc, what); + gpiod_set_value_cansleep(bitbang->mdc, what); } static const struct mdiobb_ops mdio_gpio_ops = { @@ -162,6 +162,10 @@ static int mdio_gpio_probe(struct platform_device *pdev) if (ret) return ret; + if (gpiod_cansleep(bitbang->mdc) || gpiod_cansleep(bitbang->mdio) || + gpiod_cansleep(bitbang->mdo)) + dev_warn(&pdev->dev, "Slow GPIO pins might wreak havoc into MDIO bus timing"); + if (pdev->dev.of_node) { bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); if (bus_id < 0) { From 160396a722e0c4dfd462f3eec779251bf944f438 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 16 Nov 2018 23:04:37 -0800 Subject: [PATCH 49/61] Revert "net: phy: mdio-gpio: Fix working over slow can_sleep GPIOs" This reverts commit dfa0d55ff6be64e7b6881212a291cb95f8da3b08. Discussion still ongoing, I shouldn't have applied this. Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 3a5a24daf38436..33265747bf3994 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -63,7 +63,7 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) * assume the pin serves as pull-up. If direction is * output, the default value is high. */ - gpiod_set_value_cansleep(bitbang->mdo, 1); + gpiod_set_value(bitbang->mdo, 1); return; } @@ -78,7 +78,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - return gpiod_get_value_cansleep(bitbang->mdio); + return gpiod_get_value(bitbang->mdio); } static void mdio_set(struct mdiobb_ctrl *ctrl, int what) @@ -87,9 +87,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what) container_of(ctrl, struct mdio_gpio_info, ctrl); if (bitbang->mdo) - gpiod_set_value_cansleep(bitbang->mdo, what); + gpiod_set_value(bitbang->mdo, what); else - gpiod_set_value_cansleep(bitbang->mdio, what); + gpiod_set_value(bitbang->mdio, what); } static void mdc_set(struct mdiobb_ctrl *ctrl, int what) @@ -97,7 +97,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - gpiod_set_value_cansleep(bitbang->mdc, what); + gpiod_set_value(bitbang->mdc, what); } static const struct mdiobb_ops mdio_gpio_ops = { @@ -162,10 +162,6 @@ static int mdio_gpio_probe(struct platform_device *pdev) if (ret) return ret; - if (gpiod_cansleep(bitbang->mdc) || gpiod_cansleep(bitbang->mdio) || - gpiod_cansleep(bitbang->mdo)) - dev_warn(&pdev->dev, "Slow GPIO pins might wreak havoc into MDIO bus timing"); - if (pdev->dev.of_node) { bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); if (bus_id < 0) { From df5a8ec64eed7fe45b556cfff503acd6429ab817 Mon Sep 17 00:00:00 2001 From: Martin Schiller Date: Fri, 16 Nov 2018 08:38:36 +0100 Subject: [PATCH 50/61] net: phy: mdio-gpio: Fix working over slow can_sleep GPIOs Up until commit 7e5fbd1e0700 ("net: mdio-gpio: Convert to use gpiod functions where possible"), the _cansleep variants of the gpio_ API was used. After that commit and the change to gpiod_ API, the _cansleep() was dropped. This then results in WARN_ON() when used with GPIO devices which do sleep. Add back the _cansleep() to avoid this. Fixes: 7e5fbd1e0700 ("net: mdio-gpio: Convert to use gpiod functions where possible") Signed-off-by: Martin Schiller Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 33265747bf3994..0fbcedcdf6e2ae 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -63,7 +63,7 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) * assume the pin serves as pull-up. If direction is * output, the default value is high. */ - gpiod_set_value(bitbang->mdo, 1); + gpiod_set_value_cansleep(bitbang->mdo, 1); return; } @@ -78,7 +78,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - return gpiod_get_value(bitbang->mdio); + return gpiod_get_value_cansleep(bitbang->mdio); } static void mdio_set(struct mdiobb_ctrl *ctrl, int what) @@ -87,9 +87,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what) container_of(ctrl, struct mdio_gpio_info, ctrl); if (bitbang->mdo) - gpiod_set_value(bitbang->mdo, what); + gpiod_set_value_cansleep(bitbang->mdo, what); else - gpiod_set_value(bitbang->mdio, what); + gpiod_set_value_cansleep(bitbang->mdio, what); } static void mdc_set(struct mdiobb_ctrl *ctrl, int what) @@ -97,7 +97,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - gpiod_set_value(bitbang->mdc, what); + gpiod_set_value_cansleep(bitbang->mdc, what); } static const struct mdiobb_ops mdio_gpio_ops = { From 95506588d2c1d72ca29adef8ae9bf771bcfb4ced Mon Sep 17 00:00:00 2001 From: Slavomir Kaslev Date: Fri, 16 Nov 2018 11:27:53 +0200 Subject: [PATCH 51/61] socket: do a generic_file_splice_read when proto_ops has no splice_read splice(2) fails with -EINVAL when called reading on a socket with no splice_read set in its proto_ops (such as vsock sockets). Switch this to fallbacks to a generic_file_splice_read instead. Signed-off-by: Slavomir Kaslev Signed-off-by: David S. Miller --- net/socket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/socket.c b/net/socket.c index 593826e11a5376..334fcc617ef273 100644 --- a/net/socket.c +++ b/net/socket.c @@ -853,7 +853,7 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct socket *sock = file->private_data; if (unlikely(!sock->ops->splice_read)) - return -EINVAL; + return generic_file_splice_read(file, ppos, pipe, len, flags); return sock->ops->splice_read(sock, ppos, pipe, len, flags); } From 9d332e69c1dc74dcd748de7cbd2dac5c61bda265 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Fri, 16 Nov 2018 18:50:01 +0200 Subject: [PATCH 52/61] net: bridge: fix vlan stats use-after-free on destruction Syzbot reported a use-after-free of the global vlan context on port vlan destruction. When I added per-port vlan stats I missed the fact that the global vlan context can be freed before the per-port vlan rcu callback. There're a few different ways to deal with this, I've chosen to add a new private flag that is set only when per-port stats are allocated so we can directly check it on destruction without dereferencing the global context at all. The new field in net_bridge_vlan uses a hole. v2: cosmetic change, move the check to br_process_vlan_info where the other checks are done v3: add change log in the patch, add private (in-kernel only) flags in a hole in net_bridge_vlan struct and use that instead of mixing user-space flags with private flags Fixes: 9163a0fc1f0c ("net: bridge: add support for per-port vlan stats") Reported-by: syzbot+04681da557a0e49a52e5@syzkaller.appspotmail.com Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_private.h | 7 +++++++ net/bridge/br_vlan.c | 3 ++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 2920e06a540329..04c19a37e500e8 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -102,12 +102,18 @@ struct br_tunnel_info { struct metadata_dst *tunnel_dst; }; +/* private vlan flags */ +enum { + BR_VLFLAG_PER_PORT_STATS = BIT(0), +}; + /** * struct net_bridge_vlan - per-vlan entry * * @vnode: rhashtable member * @vid: VLAN id * @flags: bridge vlan flags + * @priv_flags: private (in-kernel) bridge vlan flags * @stats: per-cpu VLAN statistics * @br: if MASTER flag set, this points to a bridge struct * @port: if MASTER flag unset, this points to a port struct @@ -127,6 +133,7 @@ struct net_bridge_vlan { struct rhash_head tnode; u16 vid; u16 flags; + u16 priv_flags; struct br_vlan_stats __percpu *stats; union { struct net_bridge *br; diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 8c9297a019475f..e84be08b828549 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -197,7 +197,7 @@ static void nbp_vlan_rcu_free(struct rcu_head *rcu) v = container_of(rcu, struct net_bridge_vlan, rcu); WARN_ON(br_vlan_is_master(v)); /* if we had per-port stats configured then free them here */ - if (v->brvlan->stats != v->stats) + if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS) free_percpu(v->stats); v->stats = NULL; kfree(v); @@ -264,6 +264,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags) err = -ENOMEM; goto out_filt; } + v->priv_flags |= BR_VLFLAG_PER_PORT_STATS; } else { v->stats = masterv->stats; } From 8840c3e2344a456267d7989b97d097e798b28b0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Fri, 16 Nov 2018 12:13:59 -0800 Subject: [PATCH 53/61] MAINTAINERS: Add entry for CAKE qdisc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We would like the existing community to be kept in the loop for any new developments on CAKE; and I certainly plan to keep maintaining it. Reflect this in MAINTAINERS. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 5a4bd37d9d02ab..99f2956be87b80 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3276,6 +3276,12 @@ F: include/uapi/linux/caif/ F: include/net/caif/ F: net/caif/ +CAKE QDISC +M: Toke Høiland-Jørgensen +L: cake@lists.bufferbloat.net (moderated for non-subscribers) +S: Maintained +F: net/sched/sch_cake.c + CALGARY x86-64 IOMMU M: Muli Ben-Yehuda M: Jon Mason From 16f7eb2b77b55da816c4e207f3f9440a8cafc00a Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Fri, 16 Nov 2018 16:58:19 +0100 Subject: [PATCH 54/61] ip_tunnel: don't force DF when MTU is locked The various types of tunnels running over IPv4 can ask to set the DF bit to do PMTU discovery. However, PMTU discovery is subject to the threshold set by the net.ipv4.route.min_pmtu sysctl, and is also disabled on routes with "mtu lock". In those cases, we shouldn't set the DF bit. This patch makes setting the DF bit conditional on the route's MTU locking state. This issue seems to be older than git history. Signed-off-by: Sabrina Dubroca Reviewed-by: Stefano Brivio Signed-off-by: David S. Miller --- net/ipv4/ip_tunnel_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index dde671e978298b..c248e0dccbe17a 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -80,7 +80,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; - iph->frag_off = df; + iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; From 5aaf6428526bcad98d6f51f2f679c919bb75d7e9 Mon Sep 17 00:00:00 2001 From: Lucas Bates Date: Fri, 16 Nov 2018 17:37:55 -0500 Subject: [PATCH 55/61] tc-testing: tdc.py: ignore errors when decoding stdout/stderr Prevent exceptions from being raised while decoding output from an executed command. There is no impact on tdc's execution and the verify command phase would fail the pattern match. Signed-off-by: Lucas Bates Signed-off-by: David S. Miller --- tools/testing/selftests/tc-testing/tdc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py index 87a04a8a5945c2..9b3f414ff1e9aa 100755 --- a/tools/testing/selftests/tc-testing/tdc.py +++ b/tools/testing/selftests/tc-testing/tdc.py @@ -134,9 +134,9 @@ def exec_cmd(args, pm, stage, command): (rawout, serr) = proc.communicate() if proc.returncode != 0 and len(serr) > 0: - foutput = serr.decode("utf-8") + foutput = serr.decode("utf-8", errors="ignore") else: - foutput = rawout.decode("utf-8") + foutput = rawout.decode("utf-8", errors="ignore") proc.stdout.close() proc.stderr.close() From c6cecf4ae44e4ce9158ef8806358142c3512cd33 Mon Sep 17 00:00:00 2001 From: "Brenda J. Butler" Date: Fri, 16 Nov 2018 17:37:56 -0500 Subject: [PATCH 56/61] tc-testing: tdc.py: Guard against lack of returncode in executed command Add some defensive coding in case one of the subprocesses created by tdc returns nothing. If no object is returned from exec_cmd, then tdc will halt with an unhandled exception. Signed-off-by: Brenda J. Butler Signed-off-by: Lucas Bates Signed-off-by: David S. Miller --- tools/testing/selftests/tc-testing/tdc.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py index 9b3f414ff1e9aa..7607ba3e3cbe4e 100755 --- a/tools/testing/selftests/tc-testing/tdc.py +++ b/tools/testing/selftests/tc-testing/tdc.py @@ -169,6 +169,8 @@ def prepare_env(args, pm, stage, prefix, cmdlist, output = None): file=sys.stderr) print("\n{} *** Error message: \"{}\"".format(prefix, foutput), file=sys.stderr) + print("returncode {}; expected {}".format(proc.returncode, + exit_codes)) print("\n{} *** Aborting test run.".format(prefix), file=sys.stderr) print("\n\n{} *** stdout ***".format(proc.stdout), file=sys.stderr) print("\n\n{} *** stderr ***".format(proc.stderr), file=sys.stderr) @@ -195,12 +197,18 @@ def run_one_test(pm, args, index, tidx): print('-----> execute stage') pm.call_pre_execute() (p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"]) - exit_code = p.returncode + if p: + exit_code = p.returncode + else: + exit_code = None + pm.call_post_execute() - if (exit_code != int(tidx["expExitCode"])): + if (exit_code is None or exit_code != int(tidx["expExitCode"])): result = False - print("exit:", exit_code, int(tidx["expExitCode"])) + print("exit: {!r}".format(exit_code)) + print("exit: {}".format(int(tidx["expExitCode"]))) + #print("exit: {!r} {}".format(exit_code, int(tidx["expExitCode"]))) print(procout) else: if args.verbose > 0: From 33d9a2c72f086cbf1087b2fd2d1a15aa9df14a7f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 17 Nov 2018 21:57:02 -0800 Subject: [PATCH 57/61] net-gro: reset skb->pkt_type in napi_reuse_skb() eth_type_trans() assumes initial value for skb->pkt_type is PACKET_HOST. This is indeed the value right after a fresh skb allocation. However, it is possible that GRO merged a packet with a different value (like PACKET_OTHERHOST in case macvlan is used), so we need to make sure napi->skb will have pkt_type set back to PACKET_HOST. Otherwise, valid packets might be dropped by the stack because their pkt_type is not PACKET_HOST. napi_reuse_skb() was added in commit 96e93eab2033 ("gro: Add internal interfaces for VLAN"), but this bug always has been there. Fixes: 96e93eab2033 ("gro: Add internal interfaces for VLAN") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/core/dev.c b/net/core/dev.c index 0ffcbdd55fa9ee..066aa902d85c3e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5655,6 +5655,10 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) skb->vlan_tci = 0; skb->dev = napi->dev; skb->skb_iif = 0; + + /* eth_type_trans() assumes pkt_type is PACKET_HOST */ + skb->pkt_type = PACKET_HOST; + skb->encapsulation = 0; skb_shinfo(skb)->gso_type = 0; skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); From adba75be0d23cca92a028749d92c60c8909bbdb3 Mon Sep 17 00:00:00 2001 From: Jon Maloy Date: Fri, 16 Nov 2018 16:55:04 -0500 Subject: [PATCH 58/61] tipc: fix lockdep warning when reinitilaizing sockets We get the following warning: [ 47.926140] 32-bit node address hash set to 2010a0a [ 47.927202] [ 47.927433] ================================ [ 47.928050] WARNING: inconsistent lock state [ 47.928661] 4.19.0+ #37 Tainted: G E [ 47.929346] -------------------------------- [ 47.929954] inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. [ 47.930116] swapper/3/0 [HC0[0]:SC1[3]:HE1:SE0] takes: [ 47.930116] 00000000af8bc31e (&(&ht->lock)->rlock){+.?.}, at: rhashtable_walk_enter+0x36/0xb0 [ 47.930116] {SOFTIRQ-ON-W} state was registered at: [ 47.930116] _raw_spin_lock+0x29/0x60 [ 47.930116] rht_deferred_worker+0x556/0x810 [ 47.930116] process_one_work+0x1f5/0x540 [ 47.930116] worker_thread+0x64/0x3e0 [ 47.930116] kthread+0x112/0x150 [ 47.930116] ret_from_fork+0x3a/0x50 [ 47.930116] irq event stamp: 14044 [ 47.930116] hardirqs last enabled at (14044): [] __local_bh_enable_ip+0x7a/0xf0 [ 47.938117] hardirqs last disabled at (14043): [] __local_bh_enable_ip+0x41/0xf0 [ 47.938117] softirqs last enabled at (14028): [] irq_enter+0x5e/0x60 [ 47.938117] softirqs last disabled at (14029): [] irq_exit+0xb5/0xc0 [ 47.938117] [ 47.938117] other info that might help us debug this: [ 47.938117] Possible unsafe locking scenario: [ 47.938117] [ 47.938117] CPU0 [ 47.938117] ---- [ 47.938117] lock(&(&ht->lock)->rlock); [ 47.938117] [ 47.938117] lock(&(&ht->lock)->rlock); [ 47.938117] [ 47.938117] *** DEADLOCK *** [ 47.938117] [ 47.938117] 2 locks held by swapper/3/0: [ 47.938117] #0: 0000000062c64f90 ((&d->timer)){+.-.}, at: call_timer_fn+0x5/0x280 [ 47.938117] #1: 00000000ee39619c (&(&d->lock)->rlock){+.-.}, at: tipc_disc_timeout+0xc8/0x540 [tipc] [ 47.938117] [ 47.938117] stack backtrace: [ 47.938117] CPU: 3 PID: 0 Comm: swapper/3 Tainted: G E 4.19.0+ #37 [ 47.938117] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 [ 47.938117] Call Trace: [ 47.938117] [ 47.938117] dump_stack+0x5e/0x8b [ 47.938117] print_usage_bug+0x1ed/0x1ff [ 47.938117] mark_lock+0x5b5/0x630 [ 47.938117] __lock_acquire+0x4c0/0x18f0 [ 47.938117] ? lock_acquire+0xa6/0x180 [ 47.938117] lock_acquire+0xa6/0x180 [ 47.938117] ? rhashtable_walk_enter+0x36/0xb0 [ 47.938117] _raw_spin_lock+0x29/0x60 [ 47.938117] ? rhashtable_walk_enter+0x36/0xb0 [ 47.938117] rhashtable_walk_enter+0x36/0xb0 [ 47.938117] tipc_sk_reinit+0xb0/0x410 [tipc] [ 47.938117] ? mark_held_locks+0x6f/0x90 [ 47.938117] ? __local_bh_enable_ip+0x7a/0xf0 [ 47.938117] ? lockdep_hardirqs_on+0x20/0x1a0 [ 47.938117] tipc_net_finalize+0xbf/0x180 [tipc] [ 47.938117] tipc_disc_timeout+0x509/0x540 [tipc] [ 47.938117] ? call_timer_fn+0x5/0x280 [ 47.938117] ? tipc_disc_msg_xmit.isra.19+0xa0/0xa0 [tipc] [ 47.938117] ? tipc_disc_msg_xmit.isra.19+0xa0/0xa0 [tipc] [ 47.938117] call_timer_fn+0xa1/0x280 [ 47.938117] ? tipc_disc_msg_xmit.isra.19+0xa0/0xa0 [tipc] [ 47.938117] run_timer_softirq+0x1f2/0x4d0 [ 47.938117] __do_softirq+0xfc/0x413 [ 47.938117] irq_exit+0xb5/0xc0 [ 47.938117] smp_apic_timer_interrupt+0xac/0x210 [ 47.938117] apic_timer_interrupt+0xf/0x20 [ 47.938117] [ 47.938117] RIP: 0010:default_idle+0x1c/0x140 [ 47.938117] Code: 90 90 90 90 90 90 90 90 90 90 90 90 90 90 0f 1f 44 00 00 41 54 55 53 65 8b 2d d8 2b 74 65 0f 1f 44 00 00 e8 c6 2c 8b ff fb f4 <65> 8b 2d c5 2b 74 65 0f 1f 44 00 00 5b 5d 41 5c c3 65 8b 05 b4 2b [ 47.938117] RSP: 0018:ffffaf6ac0207ec8 EFLAGS: 00000206 ORIG_RAX: ffffffffffffff13 [ 47.938117] RAX: ffff8f5b3735e200 RBX: 0000000000000003 RCX: 0000000000000001 [ 47.938117] RDX: 0000000000000001 RSI: 0000000000000001 RDI: ffff8f5b3735e200 [ 47.938117] RBP: 0000000000000003 R08: 0000000000000001 R09: 0000000000000000 [ 47.938117] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 [ 47.938117] R13: 0000000000000000 R14: ffff8f5b3735e200 R15: ffff8f5b3735e200 [ 47.938117] ? default_idle+0x1a/0x140 [ 47.938117] do_idle+0x1bc/0x280 [ 47.938117] cpu_startup_entry+0x19/0x20 [ 47.938117] start_secondary+0x187/0x1c0 [ 47.938117] secondary_startup_64+0xa4/0xb0 The reason seems to be that tipc_net_finalize()->tipc_sk_reinit() is calling the function rhashtable_walk_enter() within a timer interrupt. We fix this by executing tipc_net_finalize() in work queue context. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/discover.c | 19 ++++++++++--------- net/tipc/net.c | 45 +++++++++++++++++++++++++++++++++++++-------- net/tipc/net.h | 2 +- 3 files changed, 48 insertions(+), 18 deletions(-) diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 2830709957bdde..c138d68e8a695f 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -166,7 +166,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d, /* Apply trial address if we just left trial period */ if (!trial && !self) { - tipc_net_finalize(net, tn->trial_addr); + tipc_sched_net_finalize(net, tn->trial_addr); + msg_set_prevnode(buf_msg(d->skb), tn->trial_addr); msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); } @@ -300,14 +301,12 @@ static void tipc_disc_timeout(struct timer_list *t) goto exit; } - /* Trial period over ? */ - if (!time_before(jiffies, tn->addr_trial_end)) { - /* Did we just leave it ? */ - if (!tipc_own_addr(net)) - tipc_net_finalize(net, tn->trial_addr); - - msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); - msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net)); + /* Did we just leave trial period ? */ + if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) { + mod_timer(&d->timer, jiffies + TIPC_DISC_INIT); + spin_unlock_bh(&d->lock); + tipc_sched_net_finalize(net, tn->trial_addr); + return; } /* Adjust timeout interval according to discovery phase */ @@ -319,6 +318,8 @@ static void tipc_disc_timeout(struct timer_list *t) d->timer_intv = TIPC_DISC_SLOW; else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST) d->timer_intv = TIPC_DISC_FAST; + msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); + msg_set_prevnode(buf_msg(d->skb), tn->trial_addr); } mod_timer(&d->timer, jiffies + d->timer_intv); diff --git a/net/tipc/net.c b/net/tipc/net.c index 62199cf5a56c04..f076edb7433824 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -104,6 +104,14 @@ * - A local spin_lock protecting the queue of subscriber events. */ +struct tipc_net_work { + struct work_struct work; + struct net *net; + u32 addr; +}; + +static void tipc_net_finalize(struct net *net, u32 addr); + int tipc_net_init(struct net *net, u8 *node_id, u32 addr) { if (tipc_own_id(net)) { @@ -119,17 +127,38 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr) return 0; } -void tipc_net_finalize(struct net *net, u32 addr) +static void tipc_net_finalize(struct net *net, u32 addr) { struct tipc_net *tn = tipc_net(net); - if (!cmpxchg(&tn->node_addr, 0, addr)) { - tipc_set_node_addr(net, addr); - tipc_named_reinit(net); - tipc_sk_reinit(net); - tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, - TIPC_CLUSTER_SCOPE, 0, addr); - } + if (cmpxchg(&tn->node_addr, 0, addr)) + return; + tipc_set_node_addr(net, addr); + tipc_named_reinit(net); + tipc_sk_reinit(net); + tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, + TIPC_CLUSTER_SCOPE, 0, addr); +} + +static void tipc_net_finalize_work(struct work_struct *work) +{ + struct tipc_net_work *fwork; + + fwork = container_of(work, struct tipc_net_work, work); + tipc_net_finalize(fwork->net, fwork->addr); + kfree(fwork); +} + +void tipc_sched_net_finalize(struct net *net, u32 addr) +{ + struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC); + + if (!fwork) + return; + INIT_WORK(&fwork->work, tipc_net_finalize_work); + fwork->net = net; + fwork->addr = addr; + schedule_work(&fwork->work); } void tipc_net_stop(struct net *net) diff --git a/net/tipc/net.h b/net/tipc/net.h index 09ad02b50bb1ba..b7f2e364eb99e7 100644 --- a/net/tipc/net.h +++ b/net/tipc/net.h @@ -42,7 +42,7 @@ extern const struct nla_policy tipc_nl_net_policy[]; int tipc_net_init(struct net *net, u8 *node_id, u32 addr); -void tipc_net_finalize(struct net *net, u32 addr); +void tipc_sched_net_finalize(struct net *net, u32 addr); void tipc_net_stop(struct net *net); int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); From 1c1274a56999fbdf9cf84e332b28448bb2d55221 Mon Sep 17 00:00:00 2001 From: Jon Maloy Date: Sat, 17 Nov 2018 12:17:06 -0500 Subject: [PATCH 59/61] tipc: don't assume linear buffer when reading ancillary data The code for reading ancillary data from a received buffer is assuming the buffer is linear. To make this assumption true we have to linearize the buffer before message data is read. Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/socket.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 636e6131769d83..b57b1be7252bae 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1555,16 +1555,17 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb) /** * tipc_sk_anc_data_recv - optionally capture ancillary data for received message * @m: descriptor for message info - * @msg: received message header + * @skb: received message buffer * @tsk: TIPC port associated with message * * Note: Ancillary data is not captured if not requested by receiver. * * Returns 0 if successful, otherwise errno */ -static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, +static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb, struct tipc_sock *tsk) { + struct tipc_msg *msg; u32 anc_data[3]; u32 err; u32 dest_type; @@ -1573,6 +1574,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, if (likely(m->msg_controllen == 0)) return 0; + msg = buf_msg(skb); /* Optionally capture errored message object(s) */ err = msg ? msg_errcode(msg) : 0; @@ -1583,6 +1585,9 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, if (res) return res; if (anc_data[1]) { + if (skb_linearize(skb)) + return -ENOMEM; + msg = buf_msg(skb); res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], msg_data(msg)); if (res) @@ -1744,9 +1749,10 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m, /* Collect msg meta data, including error code and rejected data */ tipc_sk_set_orig_addr(m, skb); - rc = tipc_sk_anc_data_recv(m, hdr, tsk); + rc = tipc_sk_anc_data_recv(m, skb, tsk); if (unlikely(rc)) goto exit; + hdr = buf_msg(skb); /* Capture data if non-error msg, otherwise just set return value */ if (likely(!err)) { @@ -1856,9 +1862,10 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m, /* Collect msg meta data, incl. error code and rejected data */ if (!copied) { tipc_sk_set_orig_addr(m, skb); - rc = tipc_sk_anc_data_recv(m, hdr, tsk); + rc = tipc_sk_anc_data_recv(m, skb, tsk); if (rc) break; + hdr = buf_msg(skb); } /* Copy data if msg ok, otherwise return error/partial data */ From 7ddacfa564870cdd97275fd87decb6174abc6380 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sun, 18 Nov 2018 10:45:30 -0800 Subject: [PATCH 60/61] ipv6: Fix PMTU updates for UDP/raw sockets in presence of VRF Preethi reported that PMTU discovery for UDP/raw applications is not working in the presence of VRF when the socket is not bound to a device. The problem is that ip6_sk_update_pmtu does not consider the L3 domain of the skb device if the socket is not bound. Update the function to set oif to the L3 master device if relevant. Fixes: ca254490c8df ("net: Add VRF support to IPv6 stack") Reported-by: Preethi Ramachandra Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/route.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 14b422f35504f3..059f0531f7c1c8 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2359,10 +2359,13 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu); void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) { + int oif = sk->sk_bound_dev_if; struct dst_entry *dst; - ip6_update_pmtu(skb, sock_net(sk), mtu, - sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid); + if (!oif && skb->dev) + oif = l3mdev_master_ifindex(skb->dev); + + ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid); dst = __sk_dst_get(sk); if (!dst || !dst->obsolete || From 8ebebcba559a1bfbaec7bbda64feb9870b9c58da Mon Sep 17 00:00:00 2001 From: Matthew Cover Date: Sun, 18 Nov 2018 00:46:00 -0700 Subject: [PATCH 61/61] tuntap: fix multiqueue rx When writing packets to a descriptor associated with a combined queue, the packets should end up on that queue. Before this change all packets written to any descriptor associated with a tap interface end up on rx-0, even when the descriptor is associated with a different queue. The rx traffic can be generated by either of the following. 1. a simple tap program which spins up multiple queues and writes packets to each of the file descriptors 2. tx from a qemu vm with a tap multiqueue netdev The queue for rx traffic can be observed by either of the following (done on the hypervisor in the qemu case). 1. a simple netmap program which opens and reads from per-queue descriptors 2. configuring RPS and doing per-cpu captures with rxtxcpu Alternatively, if you printk() the return value of skb_get_rx_queue() just before each instance of netif_receive_skb() in tun.c, you will get 65535 for every skb. Calling skb_record_rx_queue() to set the rx queue to the queue_index fixes the association between descriptor and rx queue. Signed-off-by: Matthew Cover Signed-off-by: David S. Miller --- drivers/net/tun.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 060135ceaf0e1a..e244f5d7512a6e 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1536,6 +1536,7 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, if (!rx_batched || (!more && skb_queue_empty(queue))) { local_bh_disable(); + skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); return; @@ -1555,8 +1556,11 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *nskb; local_bh_disable(); - while ((nskb = __skb_dequeue(&process_queue))) + while ((nskb = __skb_dequeue(&process_queue))) { + skb_record_rx_queue(nskb, tfile->queue_index); netif_receive_skb(nskb); + } + skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); } @@ -2451,6 +2455,7 @@ static int tun_xdp_one(struct tun_struct *tun, if (!rcu_dereference(tun->steering_prog)) rxhash = __skb_get_hash_symmetric(skb); + skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); stats = get_cpu_ptr(tun->pcpu_stats);